mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 11:47:09 +08:00
Compare commits
1 Commits
bb/gui
...
hermes/her
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e236df3f8 |
@@ -5,15 +5,6 @@
|
||||
|
||||
# Dependencies
|
||||
node_modules
|
||||
**/node_modules
|
||||
.venv
|
||||
**/.venv
|
||||
|
||||
# Built artifacts that are regenerated inside the image. Excluded so local
|
||||
# rebuilds on the developer's machine don't invalidate the npm-install layer
|
||||
# that now depends on the full ui-tui/packages/hermes-ink/ tree being present.
|
||||
ui-tui/dist/
|
||||
ui-tui/packages/hermes-ink/dist/
|
||||
|
||||
# CI/CD
|
||||
.github
|
||||
@@ -22,10 +13,3 @@ ui-tui/packages/hermes-ink/dist/
|
||||
.env
|
||||
|
||||
*.md
|
||||
|
||||
# Runtime data (bind-mounted at /opt/data; must not leak into build context)
|
||||
data/
|
||||
|
||||
# Compose/profile runtime state (bind-mounted; avoid ownership/secret issues)
|
||||
hermes-config/
|
||||
runtime/
|
||||
|
||||
103
.env.example
103
.env.example
@@ -14,25 +14,6 @@
|
||||
# LLM_MODEL is no longer read from .env — this line is kept for reference only.
|
||||
# LLM_MODEL=anthropic/claude-opus-4.6
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (Google AI Studio / Gemini)
|
||||
# =============================================================================
|
||||
# Native Gemini API via Google's OpenAI-compatible endpoint.
|
||||
# Get your key at: https://aistudio.google.com/app/apikey
|
||||
# GOOGLE_API_KEY=your_google_ai_studio_key_here
|
||||
# GEMINI_API_KEY=your_gemini_key_here # alias for GOOGLE_API_KEY
|
||||
# Optional base URL override (default: Google's OpenAI-compatible endpoint)
|
||||
# GEMINI_BASE_URL=https://generativelanguage.googleapis.com/v1beta/openai
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (Ollama Cloud)
|
||||
# =============================================================================
|
||||
# Cloud-hosted open models via Ollama's OpenAI-compatible endpoint.
|
||||
# Get your key at: https://ollama.com/settings
|
||||
# OLLAMA_API_KEY=your_ollama_key_here
|
||||
# Optional base URL override (default: https://ollama.com/v1)
|
||||
# OLLAMA_BASE_URL=https://ollama.com/v1
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (z.ai / GLM)
|
||||
# =============================================================================
|
||||
@@ -52,15 +33,6 @@
|
||||
# KIMI_BASE_URL=https://api.kimi.com/coding/v1 # Default for sk-kimi- keys
|
||||
# KIMI_BASE_URL=https://api.moonshot.ai/v1 # For legacy Moonshot keys
|
||||
# KIMI_BASE_URL=https://api.moonshot.cn/v1 # For Moonshot China keys
|
||||
# KIMI_CN_API_KEY= # Dedicated Moonshot China key
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (Arcee AI)
|
||||
# =============================================================================
|
||||
# Arcee AI provides access to Trinity models (trinity-mini, trinity-large-*)
|
||||
# Get an Arcee key at: https://chat.arcee.ai/
|
||||
# ARCEEAI_API_KEY=
|
||||
# ARCEE_BASE_URL= # Override default base URL
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (MiniMax)
|
||||
@@ -99,23 +71,6 @@
|
||||
# HF_TOKEN=
|
||||
# OPENCODE_GO_BASE_URL=https://opencode.ai/zen/go/v1 # Override default base URL
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (Qwen OAuth)
|
||||
# =============================================================================
|
||||
# Qwen OAuth reuses your local Qwen CLI login (qwen auth qwen-oauth).
|
||||
# No API key needed — credentials come from ~/.qwen/oauth_creds.json.
|
||||
# Optional base URL override:
|
||||
# HERMES_QWEN_BASE_URL=https://portal.qwen.ai/v1
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (Xiaomi MiMo)
|
||||
# =============================================================================
|
||||
# Xiaomi MiMo models (mimo-v2-pro, mimo-v2-omni, mimo-v2-flash).
|
||||
# Get your key at: https://platform.xiaomimimo.com
|
||||
# XIAOMI_API_KEY=your_key_here
|
||||
# Optional base URL override:
|
||||
# XIAOMI_BASE_URL=https://api.xiaomimimo.com/v1
|
||||
|
||||
# =============================================================================
|
||||
# TOOL API KEYS
|
||||
# =============================================================================
|
||||
@@ -154,10 +109,6 @@
|
||||
# Only override here if you need to force a backend without touching config.yaml:
|
||||
# TERMINAL_ENV=local
|
||||
|
||||
# Override the container runtime binary (e.g. to use Podman instead of Docker).
|
||||
# Useful on systems where Docker's storage driver is broken or unavailable.
|
||||
# HERMES_DOCKER_BINARY=/usr/local/bin/podman
|
||||
|
||||
# Container images (for singularity/docker/modal backends)
|
||||
# TERMINAL_DOCKER_IMAGE=nikolaik/python-nodejs:python3.11-nodejs20
|
||||
# TERMINAL_SINGULARITY_IMAGE=docker://nikolaik/python-nodejs:python3.11-nodejs20
|
||||
@@ -244,15 +195,6 @@ BROWSERBASE_PROXIES=true
|
||||
# Uses custom Chromium build to avoid bot detection altogether
|
||||
BROWSERBASE_ADVANCED_STEALTH=false
|
||||
|
||||
# Browser engine for local mode (default: auto = Chrome)
|
||||
# "auto" — use Chrome (don't pass --engine flag)
|
||||
# "lightpanda" — use Lightpanda (1.3-5.8x faster navigation, no screenshots)
|
||||
# "chrome" — explicitly request Chrome
|
||||
# Requires agent-browser v0.25.3+. Lightpanda commands that fail or return
|
||||
# empty results are automatically retried with Chrome.
|
||||
# Also configurable via browser.engine in config.yaml.
|
||||
# AGENT_BROWSER_ENGINE=auto
|
||||
|
||||
# Browser session timeout in seconds (default: 300)
|
||||
# Sessions are cleaned up after this duration of inactivity
|
||||
BROWSER_SESSION_TIMEOUT=300
|
||||
@@ -393,9 +335,9 @@ IMAGE_TOOLS_DEBUG=false
|
||||
# Default STT provider is "local" (faster-whisper) — runs on your machine, no API key needed.
|
||||
# Install with: pip install faster-whisper
|
||||
# Model downloads automatically on first use (~150 MB for "base").
|
||||
# To use cloud providers instead, set GROQ_API_KEY, VOICE_TOOLS_OPENAI_KEY, or ELEVENLABS_API_KEY above.
|
||||
# Provider priority: local > groq > openai > mistral > xai > elevenlabs
|
||||
# Configure in config.yaml: stt.provider: local | groq | openai | mistral | xai | elevenlabs
|
||||
# To use cloud providers instead, set GROQ_API_KEY or VOICE_TOOLS_OPENAI_KEY above.
|
||||
# Provider priority: local > groq > openai
|
||||
# Configure in config.yaml: stt.provider: local | groq | openai
|
||||
|
||||
# =============================================================================
|
||||
# STT ADVANCED OVERRIDES (optional)
|
||||
@@ -403,46 +345,7 @@ IMAGE_TOOLS_DEBUG=false
|
||||
# Override default STT models per provider (normally set via stt.model in config.yaml)
|
||||
# STT_GROQ_MODEL=whisper-large-v3-turbo
|
||||
# STT_OPENAI_MODEL=whisper-1
|
||||
# STT_ELEVENLABS_MODEL=scribe_v2
|
||||
|
||||
# Override STT provider endpoints (for proxies or self-hosted instances)
|
||||
# GROQ_BASE_URL=https://api.groq.com/openai/v1
|
||||
# STT_OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
# ELEVENLABS_STT_BASE_URL=https://api.elevenlabs.io/v1
|
||||
|
||||
# =============================================================================
|
||||
# MICROSOFT TEAMS INTEGRATION
|
||||
# =============================================================================
|
||||
# Register a Bot in Azure: https://dev.botframework.com/ → "Register a bot"
|
||||
# Or use Azure Portal: Azure Active Directory → App registrations → New registration
|
||||
# Then add the bot to Teams via the Bot Framework or App Studio.
|
||||
#
|
||||
# TEAMS_CLIENT_ID= # Azure AD App (client) ID
|
||||
# TEAMS_CLIENT_SECRET= # Azure AD client secret value
|
||||
# TEAMS_TENANT_ID= # Azure AD tenant ID (or "common" for multi-tenant)
|
||||
# TEAMS_ALLOWED_USERS= # Comma-separated AAD object IDs or UPNs
|
||||
# TEAMS_ALLOW_ALL_USERS=false # Set true to skip the allowlist
|
||||
# TEAMS_HOME_CHANNEL= # Default channel/chat ID for cron delivery
|
||||
# TEAMS_HOME_CHANNEL_NAME= # Display name for the home channel
|
||||
# TEAMS_PORT=3978 # Webhook listen port (Bot Framework default)
|
||||
|
||||
# =============================================================================
|
||||
# GOOGLE CHAT INTEGRATION
|
||||
# =============================================================================
|
||||
# Connects via Cloud Pub/Sub pull subscription (no public URL required).
|
||||
# Setup walkthrough: website/docs/user-guide/messaging/google_chat.md.
|
||||
# 1. Create a GCP project, enable the Google Chat API and Cloud Pub/Sub.
|
||||
# 2. Create a Service Account with roles/pubsub.subscriber on the
|
||||
# subscription (NOT project-wide); download the JSON key.
|
||||
# 3. Configure your Chat app at console.cloud.google.com/apis/credentials
|
||||
# → Google Chat API → Configuration → Cloud Pub/Sub topic.
|
||||
# 4. (Optional, for native attachment delivery) Each user runs
|
||||
# `/setup-files` once in their own DM after Pub/Sub is wired up.
|
||||
#
|
||||
# GOOGLE_CHAT_PROJECT_ID= # GCP project hosting the topic (or set GOOGLE_CLOUD_PROJECT)
|
||||
# GOOGLE_CHAT_SUBSCRIPTION_NAME= # Full path: projects/<id>/subscriptions/<name>
|
||||
# GOOGLE_CHAT_SERVICE_ACCOUNT_JSON= # Path to SA JSON (or set GOOGLE_APPLICATION_CREDENTIALS)
|
||||
# GOOGLE_CHAT_ALLOWED_USERS= # Comma-separated emails allowed to talk to the bot
|
||||
# GOOGLE_CHAT_ALLOW_ALL_USERS=false # Set true to skip the allowlist
|
||||
# GOOGLE_CHAT_HOME_CHANNEL= # Default space (spaces/XXXX) for cron delivery
|
||||
# GOOGLE_CHAT_HOME_CHANNEL_NAME= # Display name for the home channel
|
||||
|
||||
4
.envrc
4
.envrc
@@ -1,5 +1 @@
|
||||
watch_file pyproject.toml uv.lock
|
||||
watch_file ui-tui/package-lock.json ui-tui/package.json
|
||||
watch_file flake.nix flake.lock nix/devShell.nix nix/tui.nix nix/package.nix nix/python.nix
|
||||
|
||||
use flake
|
||||
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,2 +0,0 @@
|
||||
# Auto-generated files — collapse diffs and exclude from language stats
|
||||
web/package-lock.json linguist-generated=true
|
||||
30
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
30
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -11,7 +11,6 @@ body:
|
||||
**Before submitting**, please:
|
||||
- [ ] Search [existing issues](https://github.com/NousResearch/hermes-agent/issues) to avoid duplicates
|
||||
- [ ] Update to the latest version (`hermes update`) and confirm the bug still exists
|
||||
- [ ] Run `hermes debug share` and paste the links below (see Debug Report section)
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
@@ -83,25 +82,6 @@ body:
|
||||
- Slack
|
||||
- WhatsApp
|
||||
|
||||
- type: textarea
|
||||
id: debug-report
|
||||
attributes:
|
||||
label: Debug Report
|
||||
description: |
|
||||
Run `hermes debug share` from your terminal and paste the links it prints here.
|
||||
This uploads your system info, config, and recent logs to a paste service automatically.
|
||||
|
||||
If you're in an interactive chat session, you can also use the `/debug` slash command — it does the same thing.
|
||||
|
||||
If the upload fails, run `hermes debug share --local` and paste the output directly.
|
||||
placeholder: |
|
||||
Report https://paste.rs/abc123
|
||||
agent.log https://paste.rs/def456
|
||||
gateway.log https://paste.rs/ghi789
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: os
|
||||
attributes:
|
||||
@@ -117,6 +97,8 @@ body:
|
||||
label: Python Version
|
||||
description: Output of `python --version`
|
||||
placeholder: "3.11.9"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: hermes-version
|
||||
@@ -124,14 +106,14 @@ body:
|
||||
label: Hermes Version
|
||||
description: Output of `hermes version`
|
||||
placeholder: "2.1.0"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Additional Logs / Traceback (optional)
|
||||
description: |
|
||||
The debug report above covers most logs. Use this field for any extra error output,
|
||||
tracebacks, or screenshots not captured by `hermes debug share`.
|
||||
label: Relevant Logs / Traceback
|
||||
description: Paste any error output, traceback, or log messages. This will be auto-formatted as code.
|
||||
render: shell
|
||||
|
||||
- type: textarea
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
12
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -71,15 +71,3 @@ body:
|
||||
label: Contribution
|
||||
options:
|
||||
- label: I'd like to implement this myself and submit a PR
|
||||
|
||||
- type: textarea
|
||||
id: debug-report
|
||||
attributes:
|
||||
label: Debug Report (optional)
|
||||
description: |
|
||||
If this feature request is related to a problem you're experiencing, run `hermes debug share` and paste the links here.
|
||||
In an interactive chat session, you can use `/debug` instead.
|
||||
This helps us understand your environment and any related logs.
|
||||
placeholder: |
|
||||
Report https://paste.rs/abc123
|
||||
render: shell
|
||||
|
||||
20
.github/ISSUE_TEMPLATE/setup_help.yml
vendored
20
.github/ISSUE_TEMPLATE/setup_help.yml
vendored
@@ -9,8 +9,7 @@ body:
|
||||
Sorry you're having trouble! Please fill out the details below so we can help.
|
||||
|
||||
**Quick checks first:**
|
||||
- Run `hermes debug share` and paste the links in the Debug Report section below
|
||||
- If you're in a chat session, you can use `/debug` instead — it does the same thing
|
||||
- Run `hermes doctor` and include the output below
|
||||
- Try `hermes update` to get the latest version
|
||||
- Check the [README troubleshooting section](https://github.com/NousResearch/hermes-agent#troubleshooting)
|
||||
- For general questions, consider the [Nous Research Discord](https://discord.gg/NousResearch) for faster help
|
||||
@@ -75,21 +74,10 @@ body:
|
||||
placeholder: "2.1.0"
|
||||
|
||||
- type: textarea
|
||||
id: debug-report
|
||||
id: doctor-output
|
||||
attributes:
|
||||
label: Debug Report
|
||||
description: |
|
||||
Run `hermes debug share` from your terminal and paste the links it prints here.
|
||||
This uploads your system info, config, and recent logs to a paste service automatically.
|
||||
|
||||
If you're in an interactive chat session, you can also use the `/debug` slash command — it does the same thing.
|
||||
|
||||
If the upload fails or install didn't get that far, run `hermes debug share --local` and paste the output directly.
|
||||
If even that doesn't work, run `hermes doctor` and paste that output instead.
|
||||
placeholder: |
|
||||
Report https://paste.rs/abc123
|
||||
agent.log https://paste.rs/def456
|
||||
gateway.log https://paste.rs/ghi789
|
||||
label: Output of `hermes doctor`
|
||||
description: Run `hermes doctor` and paste the full output. This will be auto-formatted.
|
||||
render: shell
|
||||
|
||||
- type: textarea
|
||||
|
||||
18
.github/actions/nix-setup/action.yml
vendored
18
.github/actions/nix-setup/action.yml
vendored
@@ -1,18 +0,0 @@
|
||||
name: 'Setup Nix'
|
||||
description: 'Install Nix and configure Cachix binary cache'
|
||||
|
||||
inputs:
|
||||
cachix-auth-token:
|
||||
description: 'Cachix auth token (enables push). Omit for read-only.'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: DeterminateSystems/nix-installer-action@ef8a148080ab6020fd15196c2084a2eea5ff2d25 # v22
|
||||
- uses: cachix/cachix-action@1eb2ef646ac0255473d23a5907ad7b04ce94065c # v17
|
||||
with:
|
||||
name: hermes-agent
|
||||
authToken: ${{ inputs.cachix-auth-token }}
|
||||
continue-on-error: true
|
||||
44
.github/dependabot.yml
vendored
44
.github/dependabot.yml
vendored
@@ -1,44 +0,0 @@
|
||||
# Dependabot configuration for hermes-agent.
|
||||
#
|
||||
# Deliberately scoped to github-actions only.
|
||||
#
|
||||
# We do NOT enable Dependabot for pip / npm / any source-dependency ecosystem
|
||||
# because we pin source dependencies exactly (uv.lock, package-lock.json) as
|
||||
# part of our supply-chain posture. Automatic version-bump PRs against those
|
||||
# pins would undermine the strategy — pins are moved deliberately, after
|
||||
# review, not on a schedule.
|
||||
#
|
||||
# github-actions is the exception: action pins (we use full commit SHAs per
|
||||
# supply-chain policy) must be updated when upstream actions publish
|
||||
# patches — usually themselves security fixes. Dependabot opens a PR with
|
||||
# the new SHA and release notes; we review and merge like any other PR.
|
||||
#
|
||||
# Security-update PRs for source dependencies (opened ONLY when a CVE is
|
||||
# published affecting a currently-pinned version) are enabled separately
|
||||
# via the repo's Dependabot security updates setting
|
||||
# (Settings → Code security → Dependabot → Dependabot security updates).
|
||||
# Those are CVE-only, not schedule-driven, and do not conflict with our
|
||||
# pinning strategy — they fire when a pinned version becomes known-bad,
|
||||
# which is exactly when we want to move the pin.
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
open-pull-requests-limit: 5
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "github-actions"
|
||||
commit-message:
|
||||
prefix: "chore(actions)"
|
||||
include: "scope"
|
||||
groups:
|
||||
# Batch routine action bumps into one PR per week to reduce noise.
|
||||
# Security updates still open individually and bypass grouping.
|
||||
actions-minor-patch:
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
73
.github/workflows/contributor-check.yml
vendored
73
.github/workflows/contributor-check.yml
vendored
@@ -1,73 +0,0 @@
|
||||
name: Contributor Attribution Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
# Only run when code files change (not docs-only PRs)
|
||||
- '*.py'
|
||||
- '**/*.py'
|
||||
- '.github/workflows/contributor-check.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-attribution:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
fetch-depth: 0 # Full history needed for git log
|
||||
|
||||
- name: Check for unmapped contributor emails
|
||||
run: |
|
||||
# Get the merge base between this PR and main
|
||||
MERGE_BASE=$(git merge-base origin/main HEAD)
|
||||
|
||||
# Find any new author emails in this PR's commits
|
||||
NEW_EMAILS=$(git log ${MERGE_BASE}..HEAD --format='%ae' --no-merges | sort -u)
|
||||
|
||||
if [ -z "$NEW_EMAILS" ]; then
|
||||
echo "No new commits to check."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check each email against AUTHOR_MAP in release.py
|
||||
MISSING=""
|
||||
while IFS= read -r email; do
|
||||
# Skip teknium and bot emails
|
||||
case "$email" in
|
||||
*teknium*|*noreply@github.com*|*dependabot*|*github-actions*|*anthropic.com*|*cursor.com*)
|
||||
continue ;;
|
||||
esac
|
||||
|
||||
# Check if email is in AUTHOR_MAP (either as a key or matches noreply pattern)
|
||||
if echo "$email" | grep -qP '\+.*@users\.noreply\.github\.com'; then
|
||||
continue # GitHub noreply emails auto-resolve
|
||||
fi
|
||||
|
||||
if ! grep -qF "\"${email}\"" scripts/release.py 2>/dev/null; then
|
||||
AUTHOR=$(git log --author="$email" --format='%an' -1)
|
||||
MISSING="${MISSING}\n ${email} (${AUTHOR})"
|
||||
fi
|
||||
done <<< "$NEW_EMAILS"
|
||||
|
||||
if [ -n "$MISSING" ]; then
|
||||
echo ""
|
||||
echo "⚠️ New contributor email(s) not in AUTHOR_MAP:"
|
||||
echo -e "$MISSING"
|
||||
echo ""
|
||||
echo "Please add mappings to scripts/release.py AUTHOR_MAP:"
|
||||
echo -e "$MISSING" | while read -r line; do
|
||||
email=$(echo "$line" | sed 's/^ *//' | cut -d' ' -f1)
|
||||
[ -z "$email" ] && continue
|
||||
echo " \"${email}\": \"<github-username>\","
|
||||
done
|
||||
echo ""
|
||||
echo "To find the GitHub username for an email:"
|
||||
echo " gh api 'search/users?q=EMAIL+in:email' --jq '.items[0].login'"
|
||||
exit 1
|
||||
else
|
||||
echo "✅ All contributor emails are mapped in AUTHOR_MAP."
|
||||
fi
|
||||
59
.github/workflows/deploy-site.yml
vendored
59
.github/workflows/deploy-site.yml
vendored
@@ -1,14 +1,11 @@
|
||||
name: Deploy Site
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'website/**'
|
||||
- 'skills/**'
|
||||
- 'optional-skills/**'
|
||||
- 'landingpage/**'
|
||||
- '.github/workflows/deploy-site.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -21,49 +18,22 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
deploy-vercel:
|
||||
if: github.event_name == 'release'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger Vercel Deploy
|
||||
run: curl -X POST "${{ secrets.VERCEL_DEPLOY_HOOK }}"
|
||||
|
||||
deploy-docs:
|
||||
build-and-deploy:
|
||||
# Only run on the upstream repository, not on forks
|
||||
if: github.repository == 'NousResearch/hermes-agent'
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deploy.outputs.page_url }}
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
cache-dependency-path: website/package-lock.json
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install PyYAML for skill extraction
|
||||
run: pip install pyyaml==6.0.2 httpx==0.28.1
|
||||
|
||||
- name: Extract skill metadata for dashboard
|
||||
run: python3 website/scripts/extract-skills.py
|
||||
|
||||
- name: Regenerate per-skill docs pages + catalogs
|
||||
run: python3 website/scripts/generate-skill-docs.py
|
||||
|
||||
- name: Build skills index (if not already present)
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
if [ ! -f website/static/api/skills-index.json ]; then
|
||||
python3 scripts/build_skills_index.py || echo "Skills index build failed (non-fatal)"
|
||||
fi
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
working-directory: website
|
||||
@@ -75,23 +45,18 @@ jobs:
|
||||
- name: Stage deployment
|
||||
run: |
|
||||
mkdir -p _site/docs
|
||||
# Landing page at root
|
||||
cp -r landingpage/* _site/
|
||||
# Docusaurus at /docs/
|
||||
cp -r website/build/* _site/docs/
|
||||
# llms.txt / llms-full.txt are also published at the site root
|
||||
# (https://hermes-agent.nousresearch.com/llms.txt) because some
|
||||
# agents and IDE plugins probe the classic root-level path rather
|
||||
# than /docs/llms.txt. Same file, two URLs, one source of truth.
|
||||
if [ -f website/build/llms.txt ]; then
|
||||
cp website/build/llms.txt _site/llms.txt
|
||||
fi
|
||||
if [ -f website/build/llms-full.txt ]; then
|
||||
cp website/build/llms-full.txt _site/llms-full.txt
|
||||
fi
|
||||
# CNAME so GitHub Pages keeps the custom domain between deploys
|
||||
echo "hermes-agent.nousresearch.com" > _site/CNAME
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa # v3
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: _site
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deploy
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
343
.github/workflows/desktop-release.yml
vendored
343
.github/workflows/desktop-release.yml
vendored
@@ -1,343 +0,0 @@
|
||||
name: Desktop Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
channel:
|
||||
description: Release channel to build
|
||||
required: true
|
||||
default: nightly
|
||||
type: choice
|
||||
options:
|
||||
- nightly
|
||||
- stable
|
||||
release_tag:
|
||||
description: "Required when channel=stable (example: v2026.5.5)"
|
||||
required: false
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
concurrency:
|
||||
group: desktop-release-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
if: github.repository == 'NousResearch/hermes-agent'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
channel: ${{ steps.meta.outputs.channel }}
|
||||
release_name: ${{ steps.meta.outputs.release_name }}
|
||||
release_tag: ${{ steps.meta.outputs.release_tag }}
|
||||
version: ${{ steps.meta.outputs.version }}
|
||||
is_stable: ${{ steps.meta.outputs.is_stable }}
|
||||
steps:
|
||||
- id: meta
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
INPUT_CHANNEL: ${{ github.event.inputs.channel }}
|
||||
INPUT_RELEASE_TAG: ${{ github.event.inputs.release_tag }}
|
||||
RELEASE_TAG_FROM_EVENT: ${{ github.event.release.tag_name }}
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
channel="nightly"
|
||||
release_tag="desktop-nightly"
|
||||
is_stable="false"
|
||||
|
||||
if [[ "$EVENT_NAME" == "release" ]]; then
|
||||
channel="stable"
|
||||
release_tag="$RELEASE_TAG_FROM_EVENT"
|
||||
is_stable="true"
|
||||
elif [[ "$EVENT_NAME" == "workflow_dispatch" && "$INPUT_CHANNEL" == "stable" ]]; then
|
||||
channel="stable"
|
||||
release_tag="$INPUT_RELEASE_TAG"
|
||||
is_stable="true"
|
||||
fi
|
||||
|
||||
if [[ "$channel" == "stable" ]]; then
|
||||
if [[ -z "$release_tag" ]]; then
|
||||
echo "Stable desktop releases require a release tag." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
version="${release_tag#v}"
|
||||
release_name="Hermes Desktop ${release_tag}"
|
||||
else
|
||||
stamp="$(date -u +%Y%m%d)"
|
||||
short_sha="${GITHUB_SHA::7}"
|
||||
version="0.0.0-nightly.${stamp}.${short_sha}"
|
||||
release_name="Hermes Desktop Nightly ${stamp}-${short_sha}"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "channel=$channel"
|
||||
echo "release_name=$release_name"
|
||||
echo "release_tag=$release_tag"
|
||||
echo "version=$version"
|
||||
echo "is_stable=$is_stable"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
if: github.repository == 'NousResearch/hermes-agent'
|
||||
needs: prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: mac
|
||||
runner: macos-latest
|
||||
build_args: --mac dmg zip
|
||||
- platform: win
|
||||
runner: windows-latest
|
||||
build_args: --win nsis msi
|
||||
runs-on: ${{ matrix.runner }}
|
||||
env:
|
||||
DESKTOP_CHANNEL: ${{ needs.prepare.outputs.channel }}
|
||||
DESKTOP_VERSION: ${{ needs.prepare.outputs.version }}
|
||||
MAC_CSC_LINK: ${{ secrets.CSC_LINK }}
|
||||
MAC_CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
|
||||
APPLE_API_KEY: ${{ secrets.APPLE_API_KEY }}
|
||||
APPLE_API_KEY_ID: ${{ secrets.APPLE_API_KEY_ID }}
|
||||
APPLE_API_ISSUER: ${{ secrets.APPLE_API_ISSUER }}
|
||||
WIN_CSC_LINK: ${{ secrets.WIN_CSC_LINK }}
|
||||
WIN_CSC_KEY_PASSWORD: ${{ secrets.WIN_CSC_KEY_PASSWORD }}
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
cache-dependency-path: package-lock.json
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Enforce signing gates for stable releases
|
||||
if: needs.prepare.outputs.is_stable == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
missing=()
|
||||
|
||||
if [[ "${{ matrix.platform }}" == "mac" ]]; then
|
||||
[[ -z "${MAC_CSC_LINK:-}" ]] && missing+=("CSC_LINK")
|
||||
[[ -z "${MAC_CSC_KEY_PASSWORD:-}" ]] && missing+=("CSC_KEY_PASSWORD")
|
||||
[[ -z "${APPLE_API_KEY:-}" ]] && missing+=("APPLE_API_KEY")
|
||||
[[ -z "${APPLE_API_KEY_ID:-}" ]] && missing+=("APPLE_API_KEY_ID")
|
||||
[[ -z "${APPLE_API_ISSUER:-}" ]] && missing+=("APPLE_API_ISSUER")
|
||||
else
|
||||
[[ -z "${WIN_CSC_LINK:-}" ]] && missing+=("WIN_CSC_LINK")
|
||||
[[ -z "${WIN_CSC_KEY_PASSWORD:-}" ]] && missing+=("WIN_CSC_KEY_PASSWORD")
|
||||
fi
|
||||
|
||||
if (( ${#missing[@]} > 0 )); then
|
||||
echo "::error::Stable desktop release missing required secrets: ${missing[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Install workspace dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build bundled TUI payload
|
||||
run: npm --prefix ui-tui run build
|
||||
|
||||
- name: Build desktop renderer
|
||||
run: npm --prefix apps/desktop run build
|
||||
|
||||
- name: Stage Hermes payload
|
||||
run: npm --prefix apps/desktop run stage:hermes
|
||||
|
||||
- name: Map macOS signing credentials
|
||||
if: matrix.platform == 'mac'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
has_link=0
|
||||
has_pass=0
|
||||
[[ -n "${MAC_CSC_LINK:-}" ]] && has_link=1
|
||||
[[ -n "${MAC_CSC_KEY_PASSWORD:-}" ]] && has_pass=1
|
||||
|
||||
if [[ $has_link -eq 1 && $has_pass -eq 1 ]]; then
|
||||
echo "CSC_LINK=${MAC_CSC_LINK}" >> "$GITHUB_ENV"
|
||||
echo "CSC_KEY_PASSWORD=${MAC_CSC_KEY_PASSWORD}" >> "$GITHUB_ENV"
|
||||
elif [[ $has_link -eq 1 || $has_pass -eq 1 ]]; then
|
||||
echo "::error::macOS signing secrets are partially configured. Set both CSC_LINK and CSC_KEY_PASSWORD."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Map Windows signing credentials
|
||||
if: matrix.platform == 'win'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
has_link=0
|
||||
has_pass=0
|
||||
[[ -n "${WIN_CSC_LINK:-}" ]] && has_link=1
|
||||
[[ -n "${WIN_CSC_KEY_PASSWORD:-}" ]] && has_pass=1
|
||||
|
||||
if [[ $has_link -eq 1 && $has_pass -eq 1 ]]; then
|
||||
echo "CSC_LINK=${WIN_CSC_LINK}" >> "$GITHUB_ENV"
|
||||
echo "CSC_KEY_PASSWORD=${WIN_CSC_KEY_PASSWORD}" >> "$GITHUB_ENV"
|
||||
echo "CSC_FOR_PULL_REQUEST=true" >> "$GITHUB_ENV"
|
||||
elif [[ $has_link -eq 1 || $has_pass -eq 1 ]]; then
|
||||
echo "::error::Windows signing secrets are partially configured. Set both WIN_CSC_LINK and WIN_CSC_KEY_PASSWORD."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Build desktop installers
|
||||
shell: bash
|
||||
env:
|
||||
NODE_OPTIONS: --max-old-space-size=16384
|
||||
run: |
|
||||
set -euo pipefail
|
||||
npm --prefix apps/desktop exec electron-builder -- \
|
||||
${{ matrix.build_args }} \
|
||||
--publish never \
|
||||
--config.extraMetadata.version="${DESKTOP_VERSION}" \
|
||||
--config.extraMetadata.desktopChannel="${DESKTOP_CHANNEL}" \
|
||||
'--config.artifactName=Hermes-${version}-${env.DESKTOP_CHANNEL}-${os}-${arch}.${ext}'
|
||||
|
||||
- name: Notarize and staple macOS DMG
|
||||
if: matrix.platform == 'mac' && needs.prepare.outputs.is_stable == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
dmg_path="$(ls apps/desktop/release/*.dmg | head -n 1)"
|
||||
node apps/desktop/scripts/notarize-artifact.cjs "$dmg_path"
|
||||
|
||||
- name: Validate macOS notarization and Gatekeeper trust
|
||||
if: matrix.platform == 'mac' && needs.prepare.outputs.is_stable == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
app_path="$(ls -d apps/desktop/release/mac*/Hermes.app | head -n 1)"
|
||||
dmg_path="$(ls apps/desktop/release/*.dmg | head -n 1)"
|
||||
xcrun stapler validate "$app_path"
|
||||
xcrun stapler validate "$dmg_path"
|
||||
spctl --assess --type execute --verbose=4 "$app_path"
|
||||
|
||||
- name: Generate desktop checksums
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
node <<'EOF'
|
||||
const crypto = require('node:crypto')
|
||||
const fs = require('node:fs')
|
||||
const path = require('node:path')
|
||||
|
||||
const releaseDir = path.resolve('apps/desktop/release')
|
||||
const platform = process.env.PLATFORM
|
||||
const extensions = platform === 'mac' ? ['.dmg', '.zip'] : ['.exe', '.msi']
|
||||
const files = fs
|
||||
.readdirSync(releaseDir)
|
||||
.filter(name => extensions.some(ext => name.endsWith(ext)))
|
||||
.sort()
|
||||
|
||||
if (!files.length) {
|
||||
throw new Error(`No release artifacts were produced for ${platform}`)
|
||||
}
|
||||
|
||||
const lines = files.map(name => {
|
||||
const full = path.join(releaseDir, name)
|
||||
const hash = crypto.createHash('sha256').update(fs.readFileSync(full)).digest('hex')
|
||||
return `${hash} ${name}`
|
||||
})
|
||||
fs.writeFileSync(path.join(releaseDir, `SHA256SUMS-${platform}.txt`), `${lines.join('\n')}\n`)
|
||||
EOF
|
||||
env:
|
||||
PLATFORM: ${{ matrix.platform }}
|
||||
|
||||
- name: Upload packaged desktop artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
with:
|
||||
name: desktop-${{ matrix.platform }}
|
||||
path: |
|
||||
apps/desktop/release/*.dmg
|
||||
apps/desktop/release/*.zip
|
||||
apps/desktop/release/*.exe
|
||||
apps/desktop/release/*.msi
|
||||
apps/desktop/release/SHA256SUMS-${{ matrix.platform }}.txt
|
||||
if-no-files-found: error
|
||||
|
||||
publish:
|
||||
if: github.repository == 'NousResearch/hermes-agent'
|
||||
needs: [prepare, build]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
CHANNEL: ${{ needs.prepare.outputs.channel }}
|
||||
RELEASE_NAME: ${{ needs.prepare.outputs.release_name }}
|
||||
RELEASE_TAG: ${{ needs.prepare.outputs.release_tag }}
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
|
||||
with:
|
||||
pattern: desktop-*
|
||||
merge-multiple: true
|
||||
path: dist/desktop
|
||||
|
||||
- name: Publish desktop assets to GitHub release
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
shopt -s globstar nullglob
|
||||
|
||||
files=(
|
||||
dist/desktop/**/*.dmg
|
||||
dist/desktop/**/*.zip
|
||||
dist/desktop/**/*.exe
|
||||
dist/desktop/**/*.msi
|
||||
dist/desktop/**/SHA256SUMS-*.txt
|
||||
)
|
||||
|
||||
if (( ${#files[@]} == 0 )); then
|
||||
echo "No desktop artifacts were downloaded for publishing." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$CHANNEL" == "nightly" ]]; then
|
||||
git tag -f "$RELEASE_TAG" "$GITHUB_SHA"
|
||||
git push origin "refs/tags/$RELEASE_TAG" --force
|
||||
|
||||
notes="Automated nightly desktop build from main. This prerelease is replaced on each new run."
|
||||
|
||||
if gh release view "$RELEASE_TAG" >/dev/null 2>&1; then
|
||||
while IFS= read -r asset_name; do
|
||||
gh release delete-asset "$RELEASE_TAG" "$asset_name" --yes
|
||||
done < <(gh release view "$RELEASE_TAG" --json assets -q '.assets[].name')
|
||||
|
||||
gh release edit "$RELEASE_TAG" \
|
||||
--title "$RELEASE_NAME" \
|
||||
--prerelease \
|
||||
--notes "$notes"
|
||||
else
|
||||
gh release create "$RELEASE_TAG" \
|
||||
--target "$GITHUB_SHA" \
|
||||
--title "$RELEASE_NAME" \
|
||||
--notes "$notes" \
|
||||
--prerelease
|
||||
fi
|
||||
else
|
||||
if ! gh release view "$RELEASE_TAG" >/dev/null 2>&1; then
|
||||
notes="Automated desktop artifacts attached by desktop-release workflow."
|
||||
gh release create "$RELEASE_TAG" \
|
||||
--target "$GITHUB_SHA" \
|
||||
--title "$RELEASE_NAME" \
|
||||
--notes "$notes"
|
||||
fi
|
||||
fi
|
||||
|
||||
gh release upload "$RELEASE_TAG" "${files[@]}" --clobber
|
||||
211
.github/workflows/docker-publish.yml
vendored
211
.github/workflows/docker-publish.yml
vendored
@@ -3,248 +3,77 @@ name: Docker Build and Publish
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- '**/*.py'
|
||||
- 'pyproject.toml'
|
||||
- 'uv.lock'
|
||||
- 'Dockerfile'
|
||||
- 'docker/**'
|
||||
- '.github/workflows/docker-publish.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# Top-level concurrency: do NOT cancel in-flight builds when a new push lands.
|
||||
# Every commit deserves its own SHA-tagged image in the registry, and we guard
|
||||
# the :latest tag in a separate job below (with its own concurrency group) so
|
||||
# a slow run can't clobber :latest with older bits.
|
||||
concurrency:
|
||||
group: docker-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
# Only run on the upstream repository, not on forks
|
||||
if: github.repository == 'NousResearch/hermes-agent'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
outputs:
|
||||
pushed_sha_tag: ${{ steps.mark_pushed.outputs.pushed }}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
# Fetch enough history to run `git merge-base --is-ancestor` in the
|
||||
# move-latest job. That job reuses this checkout via its own
|
||||
# actions/checkout call, but commits reachable from main up to ~1000
|
||||
# back are plenty for any realistic race window.
|
||||
fetch-depth: 1000
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Build amd64 only so we can `load` the image for smoke testing.
|
||||
# `load: true` cannot export a multi-arch manifest to the local daemon.
|
||||
# The multi-arch build follows on push to main / release.
|
||||
- name: Build image (amd64, smoke test)
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
|
||||
- name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
load: true
|
||||
platforms: linux/amd64
|
||||
tags: nousresearch/hermes-agent:test
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Test image starts
|
||||
run: |
|
||||
mkdir -p /tmp/hermes-test
|
||||
sudo chown -R 10000:10000 /tmp/hermes-test
|
||||
# The image runs as the hermes user (UID 10000). GitHub Actions
|
||||
# creates /tmp/hermes-test root-owned by default, which hermes
|
||||
# can't write to — chown it to match the in-container UID before
|
||||
# bind-mounting. Real users doing `docker run -v ~/.hermes:...`
|
||||
# with their own UID hit the same issue and have their own
|
||||
# remediations (HERMES_UID env var, or chown locally).
|
||||
docker run --rm \
|
||||
-v /tmp/hermes-test:/opt/data \
|
||||
--entrypoint /opt/hermes/docker/entrypoint.sh \
|
||||
nousresearch/hermes-agent:test --help
|
||||
|
||||
- name: Test dashboard subcommand
|
||||
run: |
|
||||
mkdir -p /tmp/hermes-test
|
||||
sudo chown -R 10000:10000 /tmp/hermes-test
|
||||
# Verify the dashboard subcommand is included in the Docker image.
|
||||
# This prevents regressions like #9153 where the dashboard command
|
||||
# was present in source but missing from the published image.
|
||||
docker run --rm \
|
||||
-v /tmp/hermes-test:/opt/data \
|
||||
--entrypoint /opt/hermes/docker/entrypoint.sh \
|
||||
nousresearch/hermes-agent:test dashboard --help
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'release'
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Always push a per-commit SHA tag on main. This is race-free because
|
||||
# every commit has a unique SHA — concurrent runs can't clobber each
|
||||
# other here. We also embed the git SHA as an OCI label so the
|
||||
# move-latest job (below) can read it back off the registry's `:latest`.
|
||||
- name: Push multi-arch image with SHA tag (main branch)
|
||||
id: push_sha
|
||||
- name: Push image (main branch)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: nousresearch/hermes-agent:sha-${{ github.sha }}
|
||||
labels: |
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
nousresearch/hermes-agent:latest
|
||||
nousresearch/hermes-agent:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Mark SHA tag pushed
|
||||
id: mark_pushed
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
run: echo "pushed=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Push multi-arch image (release)
|
||||
- name: Push image (release)
|
||||
if: github.event_name == 'release'
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: nousresearch/hermes-agent:${{ github.event.release.tag_name }}
|
||||
tags: |
|
||||
nousresearch/hermes-agent:latest
|
||||
nousresearch/hermes-agent:${{ github.event.release.tag_name }}
|
||||
nousresearch/hermes-agent:${{ github.sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# Second job: moves `:latest` to point at the SHA tag the first job pushed.
|
||||
#
|
||||
# Has its own concurrency group with `cancel-in-progress: true`, which
|
||||
# gives us the serialization we need: if a newer push arrives while an
|
||||
# older run is mid-way through this job, the older run is cancelled
|
||||
# before it can clobber `:latest`. Combined with the ancestor check
|
||||
# below, this means `:latest` only ever moves forward in git history.
|
||||
move-latest:
|
||||
if: |
|
||||
github.repository == 'NousResearch/hermes-agent'
|
||||
&& github.event_name == 'push'
|
||||
&& github.ref == 'refs/heads/main'
|
||||
&& needs.build-and-push.outputs.pushed_sha_tag == 'true'
|
||||
needs: build-and-push
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
concurrency:
|
||||
group: docker-move-latest-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
fetch-depth: 1000
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Read the git revision label off the current `:latest` manifest, then
|
||||
# use `git merge-base --is-ancestor` to check whether our commit is a
|
||||
# descendant of it. If `:latest` doesn't exist yet, or its label is
|
||||
# missing, we treat that as "safe to publish". If another run already
|
||||
# advanced `:latest` past us (or diverged), we skip and leave it alone.
|
||||
- name: Decide whether to move :latest
|
||||
id: latest_check
|
||||
run: |
|
||||
set -euo pipefail
|
||||
image=nousresearch/hermes-agent
|
||||
|
||||
# Pull the JSON for the linux/amd64 sub-manifest's config and extract
|
||||
# the OCI revision label with jq — Go template field access can't
|
||||
# handle dots in map keys, so using json+jq is the robust route.
|
||||
image_json=$(
|
||||
docker buildx imagetools inspect "${image}:latest" \
|
||||
--format '{{ json (index .Image "linux/amd64") }}' \
|
||||
2>/dev/null || true
|
||||
)
|
||||
|
||||
if [ -z "${image_json}" ]; then
|
||||
echo "No existing :latest (or inspect failed) — safe to publish."
|
||||
echo "push_latest=true" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
current_sha=$(
|
||||
printf '%s' "${image_json}" \
|
||||
| jq -r '.config.Labels."org.opencontainers.image.revision" // ""'
|
||||
)
|
||||
|
||||
if [ -z "${current_sha}" ]; then
|
||||
echo "Registry :latest has no revision label — safe to publish."
|
||||
echo "push_latest=true" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Registry :latest is at ${current_sha}"
|
||||
echo "This run is at ${GITHUB_SHA}"
|
||||
|
||||
if [ "${current_sha}" = "${GITHUB_SHA}" ]; then
|
||||
echo ":latest already points at our SHA — nothing to do."
|
||||
echo "push_latest=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Make sure we have the :latest commit locally for merge-base.
|
||||
if ! git cat-file -e "${current_sha}^{commit}" 2>/dev/null; then
|
||||
git fetch --no-tags --prune origin \
|
||||
"+refs/heads/main:refs/remotes/origin/main" \
|
||||
|| true
|
||||
fi
|
||||
|
||||
if ! git cat-file -e "${current_sha}^{commit}" 2>/dev/null; then
|
||||
echo "Registry :latest points at an unknown commit (${current_sha}); refusing to overwrite."
|
||||
echo "push_latest=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Our SHA must be a descendant of the current :latest to be safe.
|
||||
if git merge-base --is-ancestor "${current_sha}" "${GITHUB_SHA}"; then
|
||||
echo "Our commit is a descendant of :latest — safe to advance."
|
||||
echo "push_latest=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "Another run advanced :latest past us (or diverged) — leaving it alone."
|
||||
echo "push_latest=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
# Retag the already-pushed SHA manifest as :latest. This is a registry-
|
||||
# side operation — no rebuild, no layer re-push — so it's quick and
|
||||
# atomic per-tag. The ancestor check above plus the cancel-in-progress
|
||||
# concurrency on this job together guarantee we only ever move :latest
|
||||
# forward in git history.
|
||||
- name: Move :latest to this SHA
|
||||
if: steps.latest_check.outputs.push_latest == 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
image=nousresearch/hermes-agent
|
||||
docker buildx imagetools create \
|
||||
--tag "${image}:latest" \
|
||||
"${image}:sha-${GITHUB_SHA}"
|
||||
|
||||
17
.github/workflows/docs-site-checks.yml
vendored
17
.github/workflows/docs-site-checks.yml
vendored
@@ -7,16 +7,13 @@ on:
|
||||
- '.github/workflows/docs-site-checks.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
docs-site-checks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
@@ -26,18 +23,12 @@ jobs:
|
||||
run: npm ci
|
||||
working-directory: website
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install ascii-guard
|
||||
run: python -m pip install ascii-guard==2.3.0 pyyaml==6.0.3
|
||||
|
||||
- name: Extract skill metadata for dashboard
|
||||
run: python3 website/scripts/extract-skills.py
|
||||
|
||||
- name: Regenerate per-skill docs pages + catalogs
|
||||
run: python3 website/scripts/generate-skill-docs.py
|
||||
run: python -m pip install ascii-guard
|
||||
|
||||
- name: Lint docs diagrams
|
||||
run: npm run lint:diagrams
|
||||
|
||||
151
.github/workflows/lint.yml
vendored
151
.github/workflows/lint.yml
vendored
@@ -1,151 +0,0 @@
|
||||
name: Lint (ruff + ty)
|
||||
|
||||
# Surface ruff and ty diagnostics as a diff vs the target branch.
|
||||
# This check is advisory only ATM it always exits zero and never blocks merge.
|
||||
# It posts a Markdown summary to the workflow run and, for pull requests,
|
||||
# comments the same summary on the PR.
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**/*.md"
|
||||
- "docs/**"
|
||||
- "website/**"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**/*.md"
|
||||
- "docs/**"
|
||||
- "website/**"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write # needed to post/update PR comments
|
||||
|
||||
concurrency:
|
||||
group: lint-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint-diff:
|
||||
name: ruff + ty diff
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
fetch-depth: 0 # need full history for merge-base + worktree
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5
|
||||
|
||||
- name: Install ruff + ty
|
||||
run: |
|
||||
uv tool install ruff
|
||||
uv tool install ty
|
||||
|
||||
- name: Determine base ref
|
||||
id: base
|
||||
run: |
|
||||
# For PRs, diff against the merge base with the target branch.
|
||||
# For pushes to main, diff against the previous commit on main.
|
||||
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
||||
BASE_SHA=$(git merge-base "origin/${{ github.base_ref }}" HEAD)
|
||||
BASE_REF="origin/${{ github.base_ref }}"
|
||||
else
|
||||
BASE_SHA=$(git rev-parse HEAD~1 2>/dev/null || git rev-parse HEAD)
|
||||
BASE_REF="HEAD~1"
|
||||
fi
|
||||
echo "sha=${BASE_SHA}" >> "$GITHUB_OUTPUT"
|
||||
echo "ref=${BASE_REF}" >> "$GITHUB_OUTPUT"
|
||||
echo "Base SHA: ${BASE_SHA}"
|
||||
echo "Base ref: ${BASE_REF}"
|
||||
|
||||
- name: Run ruff + ty on HEAD
|
||||
run: |
|
||||
mkdir -p .lint-reports/head
|
||||
ruff check --output-format json --exit-zero \
|
||||
> .lint-reports/head/ruff.json || true
|
||||
ty check --output-format gitlab --exit-zero \
|
||||
> .lint-reports/head/ty.json || true
|
||||
echo "HEAD ruff: $(wc -c < .lint-reports/head/ruff.json) bytes"
|
||||
echo "HEAD ty: $(wc -c < .lint-reports/head/ty.json) bytes"
|
||||
|
||||
- name: Run ruff + ty on base (via git worktree)
|
||||
run: |
|
||||
mkdir -p .lint-reports/base
|
||||
# Use a worktree so we don't clobber the main checkout. If the basex
|
||||
# SHA is identical to HEAD (e.g. first commit), skip and leave the
|
||||
# base reports empty — the diff script handles missing files.
|
||||
HEAD_SHA=$(git rev-parse HEAD)
|
||||
BASE_SHA="${{ steps.base.outputs.sha }}"
|
||||
if [ "$BASE_SHA" = "$HEAD_SHA" ]; then
|
||||
echo "Base SHA == HEAD SHA, skipping base scan."
|
||||
echo '[]' > .lint-reports/base/ruff.json
|
||||
echo '[]' > .lint-reports/base/ty.json
|
||||
else
|
||||
git worktree add --detach /tmp/lint-base "$BASE_SHA"
|
||||
(
|
||||
cd /tmp/lint-base
|
||||
ruff check --output-format json --exit-zero \
|
||||
> "$GITHUB_WORKSPACE/.lint-reports/base/ruff.json" || true
|
||||
ty check --output-format gitlab --exit-zero \
|
||||
> "$GITHUB_WORKSPACE/.lint-reports/base/ty.json" || true
|
||||
)
|
||||
git worktree remove --force /tmp/lint-base
|
||||
fi
|
||||
echo "base ruff: $(wc -c < .lint-reports/base/ruff.json) bytes"
|
||||
echo "base ty: $(wc -c < .lint-reports/base/ty.json) bytes"
|
||||
|
||||
- name: Generate diff summary
|
||||
run: |
|
||||
python scripts/lint_diff.py \
|
||||
--base-ruff .lint-reports/base/ruff.json \
|
||||
--head-ruff .lint-reports/head/ruff.json \
|
||||
--base-ty .lint-reports/base/ty.json \
|
||||
--head-ty .lint-reports/head/ty.json \
|
||||
--base-ref "${{ steps.base.outputs.ref }}" \
|
||||
--head-ref "${{ github.event_name == 'pull_request' && github.head_ref || github.ref_name }}" \
|
||||
--output .lint-reports/summary.md
|
||||
cat .lint-reports/summary.md >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
- name: Upload reports as artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
with:
|
||||
name: lint-reports
|
||||
path: .lint-reports/
|
||||
retention-days: 14
|
||||
|
||||
- name: Post / update PR comment
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const body = fs.readFileSync('.lint-reports/summary.md', 'utf8');
|
||||
const marker = '<!-- lint-diff-summary -->';
|
||||
const fullBody = marker + '\n' + body;
|
||||
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
const existing = comments.find(c => c.body && c.body.includes(marker));
|
||||
if (existing) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: existing.id,
|
||||
body: fullBody,
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: fullBody,
|
||||
});
|
||||
}
|
||||
254
.github/workflows/nix-lockfile-fix.yml
vendored
254
.github/workflows/nix-lockfile-fix.yml
vendored
@@ -1,254 +0,0 @@
|
||||
name: Nix Lockfile Fix
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'ui-tui/package-lock.json'
|
||||
- 'ui-tui/package.json'
|
||||
- 'apps/dashboard/package-lock.json'
|
||||
- 'apps/dashboard/package.json'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: 'PR number to fix (leave empty to run on the selected branch)'
|
||||
required: false
|
||||
type: string
|
||||
issue_comment:
|
||||
types: [edited]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: nix-lockfile-fix-${{ github.event.issue.number || github.event.inputs.pr_number || github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# ── Auto-fix on main ───────────────────────────────────────────────
|
||||
# Fires when a push to main touches package.json or package-lock.json
|
||||
# in ui-tui/ or apps/dashboard/. Runs fix-lockfiles and pushes the hash
|
||||
# update commit directly to main so Nix builds never stay broken.
|
||||
#
|
||||
# Safety invariants:
|
||||
# 1. The fix commit only touches nix/*.nix files, which are NOT in
|
||||
# the paths filter above, so this cannot re-trigger itself.
|
||||
# 2. An explicit file-whitelist check before commit aborts if
|
||||
# fix-lockfiles ever modifies unexpected files.
|
||||
# 3. Job-level concurrency with cancel-in-progress: true ensures
|
||||
# back-to-back pushes collapse to the newest; ref: main checkout
|
||||
# always operates on the latest branch state.
|
||||
# 4. Uses a GitHub App token (not GITHUB_TOKEN) so the fix commit
|
||||
# triggers downstream nix.yml verification.
|
||||
auto-fix-main:
|
||||
if: github.event_name == 'push'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 25
|
||||
concurrency:
|
||||
group: auto-fix-main
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- name: Generate GitHub App token
|
||||
id: app-token
|
||||
uses: actions/create-github-app-token@7bfa3a4717ef143a604ee0a99d859b8886a96d00 # v1.9.3
|
||||
with:
|
||||
app-id: ${{ secrets.APP_ID }}
|
||||
private-key: ${{ secrets.APP_PRIVATE_KEY }}
|
||||
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
ref: main
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
|
||||
- uses: ./.github/actions/nix-setup
|
||||
with:
|
||||
cachix-auth-token: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
|
||||
- name: Apply lockfile hashes
|
||||
id: apply
|
||||
run: nix run .#fix-lockfiles -- --apply
|
||||
|
||||
- name: Commit & push
|
||||
if: steps.apply.outputs.changed == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Ensure only nix files were modified — prevents accidental
|
||||
# self-triggering if fix-lockfiles ever touches package files.
|
||||
unexpected="$(git diff --name-only | grep -Ev '^nix/(tui|web)\.nix$' || true)"
|
||||
if [ -n "$unexpected" ]; then
|
||||
echo "::error::Unexpected modified files: $unexpected"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Record the base SHA before committing — used to detect package
|
||||
# file changes if we need to rebase after a non-fast-forward push.
|
||||
BASE_SHA="$(git rev-parse HEAD)"
|
||||
|
||||
git config user.name 'github-actions[bot]'
|
||||
git config user.email '41898282+github-actions[bot]@users.noreply.github.com'
|
||||
git add nix/tui.nix nix/web.nix
|
||||
git commit -m "fix(nix): auto-refresh npm lockfile hashes" \
|
||||
-m "Source: $GITHUB_SHA" \
|
||||
-m "Run: $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID"
|
||||
|
||||
# Retry push with rebase in case main advanced with an unrelated
|
||||
# commit during the nix build. Without this, a non-fast-forward
|
||||
# rejection silently loses the fix. If package files changed during
|
||||
# the rebase, abort — a fresh auto-fix run will handle the new state.
|
||||
for attempt in 1 2 3; do
|
||||
if git push origin HEAD:main; then
|
||||
exit 0
|
||||
fi
|
||||
echo "::warning::Push attempt $attempt failed (non-fast-forward?), rebasing…"
|
||||
git fetch origin main
|
||||
|
||||
# If package files changed between our base and the new main,
|
||||
# our computed hashes are stale. Abort and let the next triggered
|
||||
# run recompute from the correct package-lock state.
|
||||
pkg_changed="$(git diff --name-only "$BASE_SHA"..origin/main -- \
|
||||
'ui-tui/package-lock.json' 'ui-tui/package.json' \
|
||||
'apps/dashboard/package-lock.json' 'apps/dashboard/package.json' || true)"
|
||||
if [ -n "$pkg_changed" ]; then
|
||||
echo "::warning::Package files changed since hash computation — aborting; a fresh run will recompute"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
git rebase origin/main
|
||||
done
|
||||
echo "::error::Failed to push after 3 rebase attempts"
|
||||
exit 1
|
||||
|
||||
# ── PR fix (manual / checkbox) ─────────────────────────────────────
|
||||
# Existing behavior: run on manual dispatch OR when a task-list
|
||||
# checkbox in the sticky lockfile-check comment flips from [ ] to [x].
|
||||
fix:
|
||||
if: |
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'issue_comment'
|
||||
&& github.event.issue.pull_request != null
|
||||
&& contains(github.event.comment.body, '[x] **Apply lockfile fix**')
|
||||
&& !contains(github.event.changes.body.from, '[x] **Apply lockfile fix**'))
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Authorize & resolve PR
|
||||
id: resolve
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
with:
|
||||
script: |
|
||||
// 1. Verify the actor has write access — applies to both checkbox
|
||||
// clicks and manual dispatch.
|
||||
const { data: perm } =
|
||||
await github.rest.repos.getCollaboratorPermissionLevel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
username: context.actor,
|
||||
});
|
||||
if (!['admin', 'write', 'maintain'].includes(perm.permission)) {
|
||||
core.setFailed(
|
||||
`${context.actor} lacks write access (has: ${perm.permission})`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// 2. Resolve which ref to check out.
|
||||
let prNumber = '';
|
||||
if (context.eventName === 'issue_comment') {
|
||||
prNumber = String(context.payload.issue.number);
|
||||
} else if (context.eventName === 'workflow_dispatch') {
|
||||
prNumber = context.payload.inputs.pr_number || '';
|
||||
}
|
||||
|
||||
if (!prNumber) {
|
||||
core.setOutput('ref', context.ref.replace(/^refs\/heads\//, ''));
|
||||
core.setOutput('repo', context.repo.repo);
|
||||
core.setOutput('owner', context.repo.owner);
|
||||
core.setOutput('pr', '');
|
||||
return;
|
||||
}
|
||||
|
||||
const { data: pr } = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: Number(prNumber),
|
||||
});
|
||||
core.setOutput('ref', pr.head.ref);
|
||||
core.setOutput('repo', pr.head.repo.name);
|
||||
core.setOutput('owner', pr.head.repo.owner.login);
|
||||
core.setOutput('pr', String(pr.number));
|
||||
|
||||
# Wipe the sticky lockfile-check comment to a "running" state as soon
|
||||
# as the job is authorized, so the user sees their click was picked up
|
||||
# before the ~minute of nix build work.
|
||||
- name: Mark sticky as running
|
||||
if: steps.resolve.outputs.pr != ''
|
||||
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
|
||||
with:
|
||||
header: nix-lockfile-check
|
||||
number: ${{ steps.resolve.outputs.pr }}
|
||||
message: |
|
||||
### 🔄 Applying lockfile fix…
|
||||
|
||||
Triggered by @${{ github.actor }} — [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}).
|
||||
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
repository: ${{ steps.resolve.outputs.owner }}/${{ steps.resolve.outputs.repo }}
|
||||
ref: ${{ steps.resolve.outputs.ref }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: ./.github/actions/nix-setup
|
||||
with:
|
||||
cachix-auth-token: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
|
||||
- name: Apply lockfile hashes
|
||||
id: apply
|
||||
run: nix run .#fix-lockfiles
|
||||
|
||||
- name: Commit & push
|
||||
if: steps.apply.outputs.changed == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git config user.name 'github-actions[bot]'
|
||||
git config user.email '41898282+github-actions[bot]@users.noreply.github.com'
|
||||
git add nix/tui.nix nix/web.nix
|
||||
git commit -m "fix(nix): refresh npm lockfile hashes"
|
||||
git push
|
||||
|
||||
- name: Update sticky (applied)
|
||||
if: steps.apply.outputs.changed == 'true' && steps.resolve.outputs.pr != ''
|
||||
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
|
||||
with:
|
||||
header: nix-lockfile-check
|
||||
number: ${{ steps.resolve.outputs.pr }}
|
||||
message: |
|
||||
### ✅ Lockfile fix applied
|
||||
|
||||
Pushed a commit refreshing the npm lockfile hashes — [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}).
|
||||
|
||||
- name: Update sticky (already current)
|
||||
if: steps.apply.outputs.changed == 'false' && steps.resolve.outputs.pr != ''
|
||||
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
|
||||
with:
|
||||
header: nix-lockfile-check
|
||||
number: ${{ steps.resolve.outputs.pr }}
|
||||
message: |
|
||||
### ✅ Lockfile hashes already current
|
||||
|
||||
Nothing to commit — [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}).
|
||||
|
||||
- name: Update sticky (failed)
|
||||
if: failure() && steps.resolve.outputs.pr != ''
|
||||
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
|
||||
with:
|
||||
header: nix-lockfile-check
|
||||
number: ${{ steps.resolve.outputs.pr }}
|
||||
message: |
|
||||
### ❌ Lockfile fix failed
|
||||
|
||||
See the [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for logs.
|
||||
101
.github/workflows/nix.yml
vendored
101
.github/workflows/nix.yml
vendored
@@ -4,10 +4,15 @@ on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
paths:
|
||||
- 'flake.nix'
|
||||
- 'flake.lock'
|
||||
- 'nix/**'
|
||||
- 'pyproject.toml'
|
||||
- 'uv.lock'
|
||||
- 'hermes_cli/**'
|
||||
- 'run_agent.py'
|
||||
- 'acp_adapter/**'
|
||||
|
||||
concurrency:
|
||||
group: nix-${{ github.ref }}
|
||||
@@ -21,97 +26,15 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
- uses: ./.github/actions/nix-setup
|
||||
with:
|
||||
cachix-auth-token: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
|
||||
- name: Resolve head SHA
|
||||
if: github.event_name == 'pull_request'
|
||||
id: sha
|
||||
shell: bash
|
||||
run: |
|
||||
FULL="${{ github.event.pull_request.head.sha || github.sha }}"
|
||||
echo "full=$FULL" >> "$GITHUB_OUTPUT"
|
||||
echo "short=${FULL:0:7}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- name: Check flake
|
||||
id: flake
|
||||
if: runner.os == 'Linux'
|
||||
continue-on-error: true
|
||||
run: nix flake check --print-build-logs
|
||||
|
||||
- name: Build package
|
||||
id: build
|
||||
if: runner.os == 'Linux'
|
||||
continue-on-error: true
|
||||
run: nix build --print-build-logs
|
||||
|
||||
# When the real Nix build fails, run a targeted diagnostic to see if
|
||||
# the failure is specifically a stale npm lockfile hash in one of the
|
||||
# known npm subpackages (tui / web). This avoids surfacing a generic
|
||||
# "build failed" message when the fix is a single known command.
|
||||
- name: Diagnose npm lockfile hashes
|
||||
id: hash_check
|
||||
if: (steps.flake.outcome == 'failure' || steps.build.outcome == 'failure') && runner.os == 'Linux'
|
||||
continue-on-error: true
|
||||
env:
|
||||
LINK_SHA: ${{ steps.sha.outputs.full }}
|
||||
run: nix run .#fix-lockfiles -- --check
|
||||
|
||||
# If fix-lockfiles itself crashes (infrastructure blip, cache throttle,
|
||||
# etc.) it won't set stale=true/false. Treat that as a distinct failure
|
||||
# mode rather than silently ignoring it.
|
||||
- name: Fail if hash check crashed without reporting
|
||||
if: steps.hash_check.outcome == 'failure' && steps.hash_check.outputs.stale != 'true' && steps.hash_check.outputs.stale != 'false'
|
||||
run: |
|
||||
echo "::error::fix-lockfiles exited without reporting stale status — likely an infrastructure or script failure"
|
||||
exit 1
|
||||
|
||||
- name: Post sticky PR comment (stale hashes)
|
||||
if: steps.hash_check.outputs.stale == 'true' && github.event_name == 'pull_request'
|
||||
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
|
||||
with:
|
||||
header: nix-lockfile-check
|
||||
message: |
|
||||
### ⚠️ npm lockfile hash out of date
|
||||
|
||||
Checked against commit [`${{ steps.sha.outputs.short }}`](${{ github.server_url }}/${{ github.repository }}/commit/${{ steps.sha.outputs.full }}) (PR head at check time).
|
||||
|
||||
The `hash = "sha256-..."` line in these nix files no longer matches the committed `package-lock.json`:
|
||||
|
||||
${{ steps.hash_check.outputs.report }}
|
||||
|
||||
#### Apply the fix
|
||||
|
||||
- [ ] **Apply lockfile fix** — tick to push a commit with the correct hashes to this PR branch
|
||||
- Or [run the Nix Lockfile Fix workflow](${{ github.server_url }}/${{ github.repository }}/actions/workflows/nix-lockfile-fix.yml) manually (pass PR `#${{ github.event.pull_request.number }}`)
|
||||
- Or locally: `nix run .#fix-lockfiles` and commit the diff
|
||||
|
||||
# Clear the sticky comment when either the build passed outright (no
|
||||
# hash check needed) or the hash check explicitly returned stale=false
|
||||
# (build failed for a non-hash reason).
|
||||
- name: Clear sticky PR comment (resolved)
|
||||
if: |
|
||||
github.event_name == 'pull_request' &&
|
||||
runner.os == 'Linux' &&
|
||||
(steps.hash_check.outputs.stale == 'false' ||
|
||||
(steps.flake.outcome == 'success' && steps.build.outcome == 'success'))
|
||||
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
|
||||
with:
|
||||
header: nix-lockfile-check
|
||||
delete: true
|
||||
|
||||
- name: Final fail if build or flake failed
|
||||
if: steps.flake.outcome == 'failure' || steps.build.outcome == 'failure'
|
||||
run: |
|
||||
if [ "${{ steps.hash_check.outputs.stale }}" == "true" ]; then
|
||||
echo "::error::Nix build failed due to stale npm lockfile hash. Run: nix run .#fix-lockfiles"
|
||||
else
|
||||
echo "::error::Nix build/flake check failed. See logs above."
|
||||
fi
|
||||
exit 1
|
||||
|
||||
- name: Evaluate flake (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: nix flake show --json > /dev/null
|
||||
|
||||
67
.github/workflows/osv-scanner.yml
vendored
67
.github/workflows/osv-scanner.yml
vendored
@@ -1,67 +0,0 @@
|
||||
name: OSV-Scanner
|
||||
|
||||
# Scans lockfiles (uv.lock, package-lock.json) against the OSV vulnerability
|
||||
# database. Runs on every PR that touches a lockfile and on a weekly schedule
|
||||
# against main.
|
||||
#
|
||||
# This is detection-only — OSV-Scanner does NOT open PRs or modify pins.
|
||||
# It reports known CVEs in currently-pinned dependency versions so we can
|
||||
# decide when and how to patch on our own schedule. Our pinning strategy
|
||||
# (full SHA / exact version) is preserved; only the notification signal
|
||||
# is added.
|
||||
#
|
||||
# Complements the existing supply-chain-audit.yml workflow (which scans
|
||||
# for malicious code patterns in PR diffs) by covering the orthogonal
|
||||
# "currently-pinned dep became known-vulnerable" case.
|
||||
#
|
||||
# Uses Google's officially-recommended reusable workflow, pinned by SHA.
|
||||
# Findings land in the repo's Security tab (Code Scanning > OSV-Scanner).
|
||||
# fail-on-vuln is disabled so the job does not block merges on pre-existing
|
||||
# vulnerabilities in pinned deps that we may need to patch deliberately.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
- 'package.json'
|
||||
- 'package-lock.json'
|
||||
- 'ui-tui/package.json'
|
||||
- 'ui-tui/package-lock.json'
|
||||
- 'website/package.json'
|
||||
- 'website/package-lock.json'
|
||||
- '.github/workflows/osv-scanner.yml'
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
- 'package.json'
|
||||
- 'package-lock.json'
|
||||
- 'ui-tui/package-lock.json'
|
||||
- 'website/package-lock.json'
|
||||
schedule:
|
||||
# Weekly scan against main — catches CVEs published after merge for
|
||||
# deps that haven't changed since.
|
||||
- cron: '0 9 * * 1'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
# Required by the reusable workflow to upload SARIF to the Security tab.
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
name: Scan lockfiles
|
||||
uses: google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@c51854704019a247608d928f370c98740469d4b5 # v2.3.5
|
||||
with:
|
||||
# Scan explicit lockfiles rather than recursing, so we only look at
|
||||
# the three sources of truth and skip vendored / test / worktree dirs.
|
||||
scan-args: |-
|
||||
--lockfile=uv.lock
|
||||
--lockfile=ui-tui/package-lock.json
|
||||
--lockfile=website/package-lock.json
|
||||
fail-on-vuln: false
|
||||
101
.github/workflows/skills-index.yml
vendored
101
.github/workflows/skills-index.yml
vendored
@@ -1,101 +0,0 @@
|
||||
name: Build Skills Index
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run twice daily: 6 AM and 6 PM UTC
|
||||
- cron: '0 6,18 * * *'
|
||||
workflow_dispatch: # Manual trigger
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'scripts/build_skills_index.py'
|
||||
- '.github/workflows/skills-index.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-index:
|
||||
# Only run on the upstream repository, not on forks
|
||||
if: github.repository == 'NousResearch/hermes-agent'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install httpx==0.28.1 pyyaml==6.0.2
|
||||
|
||||
- name: Build skills index
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: python scripts/build_skills_index.py
|
||||
|
||||
- name: Upload index artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
with:
|
||||
name: skills-index
|
||||
path: website/static/api/skills-index.json
|
||||
retention-days: 7
|
||||
|
||||
deploy-with-index:
|
||||
needs: build-index
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pages: write
|
||||
id-token: write
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deploy.outputs.page_url }}
|
||||
# Only deploy on schedule or manual trigger (not on every push to the script)
|
||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
|
||||
with:
|
||||
name: skills-index
|
||||
path: website/static/api/
|
||||
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: npm
|
||||
cache-dependency-path: website/package-lock.json
|
||||
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install PyYAML for skill extraction
|
||||
run: pip install pyyaml==6.0.2
|
||||
|
||||
- name: Extract skill metadata for dashboard
|
||||
run: python3 website/scripts/extract-skills.py
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
working-directory: website
|
||||
|
||||
- name: Build Docusaurus
|
||||
run: npm run build
|
||||
working-directory: website
|
||||
|
||||
- name: Stage deployment
|
||||
run: |
|
||||
mkdir -p _site/docs
|
||||
cp -r landingpage/* _site/
|
||||
cp -r website/build/* _site/docs/
|
||||
echo "hermes-agent.nousresearch.com" > _site/CNAME
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa # v3
|
||||
with:
|
||||
path: _site
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deploy
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4
|
||||
131
.github/workflows/supply-chain-audit.yml
vendored
131
.github/workflows/supply-chain-audit.yml
vendored
@@ -3,39 +3,22 @@ name: Supply Chain Audit
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
paths:
|
||||
- '**/*.py'
|
||||
- '**/*.pth'
|
||||
- '**/setup.py'
|
||||
- '**/setup.cfg'
|
||||
- '**/sitecustomize.py'
|
||||
- '**/usercustomize.py'
|
||||
- '**/__init__.pth'
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
# Narrow, high-signal scanner. Only fires on critical indicators of supply
|
||||
# chain attacks (e.g. the litellm-style payloads). Low-signal heuristics
|
||||
# (plain base64, plain exec/eval, dependency/Dockerfile/workflow edits,
|
||||
# Actions version unpinning, outbound POST/PUT) were intentionally
|
||||
# removed — they fired on nearly every PR and trained reviewers to ignore
|
||||
# the scanner. Keep this file's checks ruthlessly narrow: if you find
|
||||
# yourself adding WARNING-tier patterns here again, make a separate
|
||||
# advisory-only workflow instead.
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
name: Scan PR for critical supply chain risks
|
||||
name: Scan PR for supply chain risks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Scan diff for critical patterns
|
||||
- name: Scan diff for suspicious patterns
|
||||
id: scan
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -45,19 +28,19 @@ jobs:
|
||||
BASE="${{ github.event.pull_request.base.sha }}"
|
||||
HEAD="${{ github.event.pull_request.head.sha }}"
|
||||
|
||||
# Added lines only, excluding lockfiles.
|
||||
# Get the full diff (added lines only)
|
||||
DIFF=$(git diff "$BASE".."$HEAD" -- . ':!uv.lock' ':!*.lock' ':!package-lock.json' ':!yarn.lock' || true)
|
||||
|
||||
FINDINGS=""
|
||||
CRITICAL=false
|
||||
|
||||
# --- .pth files (auto-execute on Python startup) ---
|
||||
# The exact mechanism used in the litellm supply chain attack:
|
||||
# https://github.com/BerriAI/litellm/issues/24512
|
||||
PTH_FILES=$(git diff --name-only "$BASE".."$HEAD" | grep '\.pth$' || true)
|
||||
if [ -n "$PTH_FILES" ]; then
|
||||
CRITICAL=true
|
||||
FINDINGS="${FINDINGS}
|
||||
### 🚨 CRITICAL: .pth file added or modified
|
||||
Python \`.pth\` files in \`site-packages/\` execute automatically when the interpreter starts — no import required.
|
||||
Python \`.pth\` files in \`site-packages/\` execute automatically when the interpreter starts — no import required. This is the exact mechanism used in the [litellm supply chain attack](https://github.com/BerriAI/litellm/issues/24512).
|
||||
|
||||
**Files:**
|
||||
\`\`\`
|
||||
@@ -66,12 +49,13 @@ jobs:
|
||||
"
|
||||
fi
|
||||
|
||||
# --- base64 decode + exec/eval on the same line (the litellm attack pattern) ---
|
||||
# --- base64 + exec/eval combo (the litellm attack pattern) ---
|
||||
B64_EXEC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'base64\.(b64decode|decodebytes|urlsafe_b64decode)' | grep -iE 'exec\(|eval\(' | head -10 || true)
|
||||
if [ -n "$B64_EXEC_HITS" ]; then
|
||||
CRITICAL=true
|
||||
FINDINGS="${FINDINGS}
|
||||
### 🚨 CRITICAL: base64 decode + exec/eval combo
|
||||
Base64-decoded strings passed directly to exec/eval — the signature of hidden credential-stealing payloads.
|
||||
This is the exact pattern used in the [litellm supply chain attack](https://github.com/BerriAI/litellm/issues/24512) — base64-decoded strings passed to exec/eval to hide credential-stealing payloads.
|
||||
|
||||
**Matches:**
|
||||
\`\`\`
|
||||
@@ -80,12 +64,41 @@ jobs:
|
||||
"
|
||||
fi
|
||||
|
||||
# --- subprocess with encoded/obfuscated command argument ---
|
||||
PROC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E 'subprocess\.(Popen|call|run)\s*\(' | grep -iE 'base64|\\x[0-9a-f]{2}|chr\(' | head -10 || true)
|
||||
# --- base64 decode/encode (alone — legitimate uses exist) ---
|
||||
B64_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'base64\.(b64decode|b64encode|decodebytes|encodebytes|urlsafe_b64decode)|atob\(|btoa\(|Buffer\.from\(.*base64' | head -20 || true)
|
||||
if [ -n "$B64_HITS" ]; then
|
||||
FINDINGS="${FINDINGS}
|
||||
### ⚠️ WARNING: base64 encoding/decoding detected
|
||||
Base64 has legitimate uses (images, JWT, etc.) but is also commonly used to obfuscate malicious payloads. Verify the usage is appropriate.
|
||||
|
||||
**Matches (first 20):**
|
||||
\`\`\`
|
||||
${B64_HITS}
|
||||
\`\`\`
|
||||
"
|
||||
fi
|
||||
|
||||
# --- exec/eval with string arguments ---
|
||||
EXEC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E '(exec|eval)\s*\(' | grep -v '^\+\s*#' | grep -v 'test_\|mock\|assert\|# ' | head -20 || true)
|
||||
if [ -n "$EXEC_HITS" ]; then
|
||||
FINDINGS="${FINDINGS}
|
||||
### ⚠️ WARNING: exec() or eval() usage
|
||||
Dynamic code execution can hide malicious behavior, especially when combined with base64 or network fetches.
|
||||
|
||||
**Matches (first 20):**
|
||||
\`\`\`
|
||||
${EXEC_HITS}
|
||||
\`\`\`
|
||||
"
|
||||
fi
|
||||
|
||||
# --- subprocess with encoded/obfuscated commands ---
|
||||
PROC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E 'subprocess\.(Popen|call|run)\s*\(' | grep -iE 'base64|decode|encode|\\x|chr\(' | head -10 || true)
|
||||
if [ -n "$PROC_HITS" ]; then
|
||||
CRITICAL=true
|
||||
FINDINGS="${FINDINGS}
|
||||
### 🚨 CRITICAL: subprocess with encoded/obfuscated command
|
||||
Subprocess calls whose command strings are base64- or hex-encoded are a strong indicator of payload execution.
|
||||
Subprocess calls with encoded arguments are a strong indicator of payload execution.
|
||||
|
||||
**Matches:**
|
||||
\`\`\`
|
||||
@@ -94,12 +107,25 @@ jobs:
|
||||
"
|
||||
fi
|
||||
|
||||
# --- Install-hook files (setup.py/sitecustomize/usercustomize/__init__.pth) ---
|
||||
# These execute during pip install or interpreter startup.
|
||||
SETUP_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -E '(^|/)(setup\.py|setup\.cfg|sitecustomize\.py|usercustomize\.py|__init__\.pth)$' || true)
|
||||
# --- Network calls to non-standard domains ---
|
||||
EXFIL_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'requests\.(post|put)\(|httpx\.(post|put)\(|urllib\.request\.urlopen' | grep -v '^\+\s*#' | grep -v 'test_\|mock\|assert' | head -10 || true)
|
||||
if [ -n "$EXFIL_HITS" ]; then
|
||||
FINDINGS="${FINDINGS}
|
||||
### ⚠️ WARNING: Outbound network calls (POST/PUT)
|
||||
Outbound POST/PUT requests in new code could be data exfiltration. Verify the destination URLs are legitimate.
|
||||
|
||||
**Matches (first 10):**
|
||||
\`\`\`
|
||||
${EXFIL_HITS}
|
||||
\`\`\`
|
||||
"
|
||||
fi
|
||||
|
||||
# --- setup.py / setup.cfg install hooks ---
|
||||
SETUP_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -E '(setup\.py|setup\.cfg|__init__\.pth|sitecustomize\.py|usercustomize\.py)$' || true)
|
||||
if [ -n "$SETUP_HITS" ]; then
|
||||
FINDINGS="${FINDINGS}
|
||||
### 🚨 CRITICAL: Install-hook file added or modified
|
||||
### ⚠️ WARNING: Install hook files modified
|
||||
These files can execute code during package installation or interpreter startup.
|
||||
|
||||
**Files:**
|
||||
@@ -109,31 +135,58 @@ jobs:
|
||||
"
|
||||
fi
|
||||
|
||||
# --- Compile/marshal/pickle (code object injection) ---
|
||||
MARSHAL_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'marshal\.loads|pickle\.loads|compile\(' | grep -v '^\+\s*#' | grep -v 'test_\|re\.compile\|ast\.compile' | head -10 || true)
|
||||
if [ -n "$MARSHAL_HITS" ]; then
|
||||
FINDINGS="${FINDINGS}
|
||||
### ⚠️ WARNING: marshal/pickle/compile usage
|
||||
These can deserialize or construct executable code objects.
|
||||
|
||||
**Matches:**
|
||||
\`\`\`
|
||||
${MARSHAL_HITS}
|
||||
\`\`\`
|
||||
"
|
||||
fi
|
||||
|
||||
# --- Output results ---
|
||||
if [ -n "$FINDINGS" ]; then
|
||||
echo "found=true" >> "$GITHUB_OUTPUT"
|
||||
if [ "$CRITICAL" = true ]; then
|
||||
echo "critical=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "critical=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
# Write findings to a file (multiline env vars are fragile)
|
||||
echo "$FINDINGS" > /tmp/findings.md
|
||||
else
|
||||
echo "found=false" >> "$GITHUB_OUTPUT"
|
||||
echo "critical=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Post critical finding comment
|
||||
- name: Post warning comment
|
||||
if: steps.scan.outputs.found == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
BODY="## 🚨 CRITICAL Supply Chain Risk Detected
|
||||
SEVERITY="⚠️ Supply Chain Risk Detected"
|
||||
if [ "${{ steps.scan.outputs.critical }}" = "true" ]; then
|
||||
SEVERITY="🚨 CRITICAL Supply Chain Risk Detected"
|
||||
fi
|
||||
|
||||
This PR contains a pattern that has been used in real supply chain attacks. A maintainer must review the flagged code carefully before merging.
|
||||
BODY="## ${SEVERITY}
|
||||
|
||||
This PR contains patterns commonly associated with supply chain attacks. This does **not** mean the PR is malicious — but these patterns require careful human review before merging.
|
||||
|
||||
$(cat /tmp/findings.md)
|
||||
|
||||
---
|
||||
*Scanner only fires on high-signal indicators: .pth files, base64+exec/eval combos, subprocess with encoded commands, or install-hook files. Low-signal warnings were removed intentionally — if you're seeing this comment, the finding is worth inspecting.*"
|
||||
*Automated scan triggered by [supply-chain-audit](/.github/workflows/supply-chain-audit.yml). If this is a false positive, a maintainer can approve after manual review.*"
|
||||
|
||||
gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY" || echo "::warning::Could not post PR comment (expected for fork PRs — GITHUB_TOKEN is read-only)"
|
||||
gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY"
|
||||
|
||||
- name: Fail on critical findings
|
||||
if: steps.scan.outputs.found == 'true'
|
||||
if: steps.scan.outputs.critical == 'true'
|
||||
run: |
|
||||
echo "::error::CRITICAL supply chain risk patterns detected in this PR. See the PR comment for details."
|
||||
exit 1
|
||||
|
||||
22
.github/workflows/tests.yml
vendored
22
.github/workflows/tests.yml
vendored
@@ -3,17 +3,8 @@ name: Tests
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
- 'docs/**'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
- 'docs/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# Cancel in-progress runs for the same PR/branch
|
||||
concurrency:
|
||||
@@ -23,16 +14,13 @@ concurrency:
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- name: Install system dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y ripgrep
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5
|
||||
uses: astral-sh/setup-uv@v5
|
||||
|
||||
- name: Set up Python 3.11
|
||||
run: uv python install 3.11
|
||||
@@ -58,10 +46,10 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5
|
||||
uses: astral-sh/setup-uv@v5
|
||||
|
||||
- name: Set up Python 3.11
|
||||
run: uv python install 3.11
|
||||
|
||||
25
.gitignore
vendored
25
.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
.DS_Store
|
||||
/venv/
|
||||
/_pycache/
|
||||
*.pyc*
|
||||
@@ -52,34 +51,10 @@ ignored/
|
||||
.worktrees/
|
||||
environments/benchmarks/evals/
|
||||
|
||||
# Web UI build output
|
||||
hermes_cli/web_dist/
|
||||
apps/desktop/build/
|
||||
apps/desktop/dist/
|
||||
apps/desktop/release/
|
||||
apps/desktop/*.tsbuildinfo
|
||||
|
||||
# Web UI assets — synced from @nous-research/ui at build time via
|
||||
# `npm run sync-assets` (see web/package.json).
|
||||
web/public/fonts/
|
||||
web/public/ds-assets/
|
||||
|
||||
# Release script temp files
|
||||
.release_notes.md
|
||||
mini-swe-agent/
|
||||
|
||||
# Nix
|
||||
.direnv/
|
||||
.nix-stamps/
|
||||
result
|
||||
website/static/api/skills-index.json
|
||||
models-dev-upstream/
|
||||
|
||||
# Local editor / agent tooling (machine-specific; keep in global config, not the repo)
|
||||
.codex/
|
||||
.cursor/
|
||||
.gemini/
|
||||
.zed/
|
||||
.mcp.json
|
||||
opencode.json
|
||||
config/mcporter.json
|
||||
|
||||
108
.mailmap
108
.mailmap
@@ -1,108 +0,0 @@
|
||||
# .mailmap — canonical author mapping for git shortlog / git log / GitHub
|
||||
# Format: Canonical Name <canonical@email> <commit@email>
|
||||
# See: https://git-scm.com/docs/gitmailmap
|
||||
#
|
||||
# This maps commit emails to GitHub noreply addresses so that:
|
||||
# 1. `git shortlog -sn` shows deduplicated contributor counts
|
||||
# 2. GitHub's contributor graph can attribute commits correctly
|
||||
# 3. Contributors with personal/work emails get proper credit
|
||||
#
|
||||
# When adding entries: use the contributor's GitHub noreply email as canonical
|
||||
# so GitHub can link commits to their profile.
|
||||
|
||||
# === Teknium (multiple emails) ===
|
||||
Teknium <127238744+teknium1@users.noreply.github.com> <teknium1@gmail.com>
|
||||
Teknium <127238744+teknium1@users.noreply.github.com> <teknium@nousresearch.com>
|
||||
|
||||
# === Contributors — personal/work emails mapped to GitHub noreply ===
|
||||
# Format: Canonical Name <GH-noreply> <commit-email>
|
||||
|
||||
# Verified via GH API email search
|
||||
luyao618 <364939526@qq.com> <364939526@qq.com>
|
||||
ethernet8023 <arilotter@gmail.com> <arilotter@gmail.com>
|
||||
nicoloboschi <boschi1997@gmail.com> <boschi1997@gmail.com>
|
||||
cherifya <chef.ya@gmail.com> <chef.ya@gmail.com>
|
||||
BongSuCHOI <chlqhdtn98@gmail.com> <chlqhdtn98@gmail.com>
|
||||
dsocolobsky <dsocolobsky@gmail.com> <dsocolobsky@gmail.com>
|
||||
pefontana <fontana.pedro93@gmail.com> <fontana.pedro93@gmail.com>
|
||||
Helmi <frank@helmschrott.de> <frank@helmschrott.de>
|
||||
hata1234 <hata1234@gmail.com> <hata1234@gmail.com>
|
||||
|
||||
# Verified via PR investigation / salvage PR bodies
|
||||
DeployFaith <agents@kylefrench.dev> <agents@kylefrench.dev>
|
||||
flobo3 <floptopbot33@gmail.com> <floptopbot33@gmail.com>
|
||||
gaixianggeng <gaixg94@gmail.com> <gaixg94@gmail.com>
|
||||
KUSH42 <xush@xush.org> <xush@xush.org>
|
||||
konsisumer <der@konsi.org> <der@konsi.org>
|
||||
WorldInnovationsDepartment <vorvul.danylo@gmail.com> <vorvul.danylo@gmail.com>
|
||||
m0n5t3r <iacobs@m0n5t3r.info> <iacobs@m0n5t3r.info>
|
||||
sprmn24 <oncuevtv@gmail.com> <oncuevtv@gmail.com>
|
||||
fancydirty <fancydirty@gmail.com> <fancydirty@gmail.com>
|
||||
fxfitz <francis.x.fitzpatrick@gmail.com> <francis.x.fitzpatrick@gmail.com>
|
||||
limars874 <limars874@gmail.com> <limars874@gmail.com>
|
||||
AaronWong1999 <aaronwong1999@icloud.com> <aaronwong1999@icloud.com>
|
||||
dippwho <dipp.who@gmail.com> <dipp.who@gmail.com>
|
||||
duerzy <duerzy@gmail.com> <duerzy@gmail.com>
|
||||
geoffwellman <geoff.wellman@gmail.com> <geoff.wellman@gmail.com>
|
||||
hcshen0111 <shenhaocheng19990111@gmail.com> <shenhaocheng19990111@gmail.com>
|
||||
jamesarch <han.shan@live.cn> <han.shan@live.cn>
|
||||
stephenschoettler <stephenschoettler@gmail.com> <stephenschoettler@gmail.com>
|
||||
Tranquil-Flow <tranquil_flow@protonmail.com> <tranquil_flow@protonmail.com>
|
||||
Dusk1e <yusufalweshdemir@gmail.com> <yusufalweshdemir@gmail.com>
|
||||
Awsh1 <ysfalweshcan@gmail.com> <ysfalweshcan@gmail.com>
|
||||
WAXLYY <ysfwaxlycan@gmail.com> <ysfwaxlycan@gmail.com>
|
||||
donrhmexe <don.rhm@gmail.com> <don.rhm@gmail.com>
|
||||
hqhq1025 <1506751656@qq.com> <1506751656@qq.com>
|
||||
BlackishGreen33 <s5460703@gmail.com> <s5460703@gmail.com>
|
||||
tomqiaozc <zqiao@microsoft.com> <zqiao@microsoft.com>
|
||||
MagicRay1217 <mingjwan@microsoft.com> <mingjwan@microsoft.com>
|
||||
aaronagent <1115117931@qq.com> <1115117931@qq.com>
|
||||
YoungYang963 <young@YoungdeMacBook-Pro.local> <young@YoungdeMacBook-Pro.local>
|
||||
LongOddCode <haolong@microsoft.com> <haolong@microsoft.com>
|
||||
Cafexss <coffeemjj@gmail.com> <coffeemjj@gmail.com>
|
||||
Cygra <sjtuwbh@gmail.com> <sjtuwbh@gmail.com>
|
||||
DomGrieco <dgrieco@redhat.com> <dgrieco@redhat.com>
|
||||
|
||||
# Duplicate email mapping (same person, multiple emails)
|
||||
Sertug17 <104278804+Sertug17@users.noreply.github.com> <srhtsrht17@gmail.com>
|
||||
yyovil <birdiegyal@gmail.com> <tanishq231003@gmail.com>
|
||||
DomGrieco <dgrieco@redhat.com> <dgrieco@redhat.com>
|
||||
dsocolobsky <dsocolobsky@gmail.com> <dylan.socolobsky@lambdaclass.com>
|
||||
olafthiele <programming@olafthiele.com> <olafthiele@gmail.com>
|
||||
|
||||
# Verified via git display name matching GH contributor username
|
||||
cokemine <aptx4561@gmail.com> <aptx4561@gmail.com>
|
||||
dalianmao000 <dalianmao0107@gmail.com> <dalianmao0107@gmail.com>
|
||||
emozilla <emozilla@nousresearch.com> <emozilla@nousresearch.com>
|
||||
jjovalle99 <juan.ovalle@mistral.ai> <juan.ovalle@mistral.ai>
|
||||
kagura-agent <kagura.chen28@gmail.com> <kagura.chen28@gmail.com>
|
||||
spniyant <niyant@spicefi.xyz> <niyant@spicefi.xyz>
|
||||
olafthiele <programming@olafthiele.com> <programming@olafthiele.com>
|
||||
r266-tech <r2668940489@gmail.com> <r2668940489@gmail.com>
|
||||
xingkongliang <tianliangjay@gmail.com> <tianliangjay@gmail.com>
|
||||
win4r <win4r@outlook.com> <win4r@outlook.com>
|
||||
zhouboli <zhouboli@gmail.com> <zhouboli@gmail.com>
|
||||
yongtenglei <yongtenglei@gmail.com> <yongtenglei@gmail.com>
|
||||
|
||||
# Nous Research team
|
||||
benbarclay <ben@nousresearch.com> <ben@nousresearch.com>
|
||||
jquesnelle <jonny@nousresearch.com> <jonny@nousresearch.com>
|
||||
|
||||
# GH contributor list verified
|
||||
spideystreet <dhicham.pro@gmail.com> <dhicham.pro@gmail.com>
|
||||
dorukardahan <dorukardahan@hotmail.com> <dorukardahan@hotmail.com>
|
||||
MustafaKara7 <karamusti912@gmail.com> <karamusti912@gmail.com>
|
||||
Hmbown <hmbown@gmail.com> <hmbown@gmail.com>
|
||||
kamil-gwozdz <kamil@gwozdz.me> <kamil@gwozdz.me>
|
||||
kira-ariaki <kira@ariaki.me> <kira@ariaki.me>
|
||||
knopki <knopki@duck.com> <knopki@duck.com>
|
||||
Unayung <unayung@gmail.com> <unayung@gmail.com>
|
||||
SeeYangZhi <yangzhi.see@gmail.com> <yangzhi.see@gmail.com>
|
||||
Julientalbot <julien.talbot@ergonomia.re> <julien.talbot@ergonomia.re>
|
||||
lesterli <lisicheng168@gmail.com> <lisicheng168@gmail.com>
|
||||
JiayuuWang <jiayuw794@gmail.com> <jiayuw794@gmail.com>
|
||||
tesseracttars-creator <tesseracttars@gmail.com> <tesseracttars@gmail.com>
|
||||
xinbenlv <zzn+pa@zzn.im> <zzn+pa@zzn.im>
|
||||
SaulJWu <saul.jj.wu@gmail.com> <saul.jj.wu@gmail.com>
|
||||
angelos <angelos@oikos.lan.home.malaiwah.com> <angelos@oikos.lan.home.malaiwah.com>
|
||||
MestreY0d4-Uninter <241404605+MestreY0d4-Uninter@users.noreply.github.com> <MestreY0d4-Uninter@users.noreply.github.com>
|
||||
695
AGENTS.md
695
AGENTS.md
@@ -2,95 +2,68 @@
|
||||
|
||||
Instructions for AI coding assistants and developers working on the hermes-agent codebase.
|
||||
|
||||
**Never give up on the right solution.**
|
||||
|
||||
## Development Environment
|
||||
|
||||
```bash
|
||||
# Prefer .venv; fall back to venv if that's what your checkout has.
|
||||
source .venv/bin/activate # or: source venv/bin/activate
|
||||
source venv/bin/activate # ALWAYS activate before running Python
|
||||
```
|
||||
|
||||
`scripts/run_tests.sh` probes `.venv` first, then `venv`, then
|
||||
`$HOME/.hermes/hermes-agent/venv` (for worktrees that share a venv with the
|
||||
main checkout).
|
||||
|
||||
## Project Structure
|
||||
|
||||
File counts shift constantly — don't treat the tree below as exhaustive.
|
||||
The canonical source is the filesystem. The notes call out the load-bearing
|
||||
entry points you'll actually edit.
|
||||
|
||||
```
|
||||
hermes-agent/
|
||||
├── run_agent.py # AIAgent class — core conversation loop (~12k LOC)
|
||||
├── model_tools.py # Tool orchestration, discover_builtin_tools(), handle_function_call()
|
||||
├── run_agent.py # AIAgent class — core conversation loop
|
||||
├── model_tools.py # Tool orchestration, _discover_tools(), handle_function_call()
|
||||
├── toolsets.py # Toolset definitions, _HERMES_CORE_TOOLS list
|
||||
├── cli.py # HermesCLI class — interactive CLI orchestrator (~11k LOC)
|
||||
├── cli.py # HermesCLI class — interactive CLI orchestrator
|
||||
├── hermes_state.py # SessionDB — SQLite session store (FTS5 search)
|
||||
├── hermes_constants.py # get_hermes_home(), display_hermes_home() — profile-aware paths
|
||||
├── hermes_logging.py # setup_logging() — agent.log / errors.log / gateway.log (profile-aware)
|
||||
├── batch_runner.py # Parallel batch processing
|
||||
├── agent/ # Agent internals (provider adapters, memory, caching, compression, etc.)
|
||||
├── hermes_cli/ # CLI subcommands, setup wizard, plugins loader, skin engine
|
||||
├── tools/ # Tool implementations — auto-discovered via tools/registry.py
|
||||
├── agent/ # Agent internals
|
||||
│ ├── prompt_builder.py # System prompt assembly
|
||||
│ ├── context_compressor.py # Auto context compression
|
||||
│ ├── prompt_caching.py # Anthropic prompt caching
|
||||
│ ├── auxiliary_client.py # Auxiliary LLM client (vision, summarization)
|
||||
│ ├── model_metadata.py # Model context lengths, token estimation
|
||||
│ ├── models_dev.py # models.dev registry integration (provider-aware context)
|
||||
│ ├── display.py # KawaiiSpinner, tool preview formatting
|
||||
│ ├── skill_commands.py # Skill slash commands (shared CLI/gateway)
|
||||
│ └── trajectory.py # Trajectory saving helpers
|
||||
├── hermes_cli/ # CLI subcommands and setup
|
||||
│ ├── main.py # Entry point — all `hermes` subcommands
|
||||
│ ├── config.py # DEFAULT_CONFIG, OPTIONAL_ENV_VARS, migration
|
||||
│ ├── commands.py # Slash command definitions + SlashCommandCompleter
|
||||
│ ├── callbacks.py # Terminal callbacks (clarify, sudo, approval)
|
||||
│ ├── setup.py # Interactive setup wizard
|
||||
│ ├── skin_engine.py # Skin/theme engine — CLI visual customization
|
||||
│ ├── skills_config.py # `hermes skills` — enable/disable skills per platform
|
||||
│ ├── tools_config.py # `hermes tools` — enable/disable tools per platform
|
||||
│ ├── skills_hub.py # `/skills` slash command (search, browse, install)
|
||||
│ ├── models.py # Model catalog, provider model lists
|
||||
│ ├── model_switch.py # Shared /model switch pipeline (CLI + gateway)
|
||||
│ └── auth.py # Provider credential resolution
|
||||
├── tools/ # Tool implementations (one file per tool)
|
||||
│ ├── registry.py # Central tool registry (schemas, handlers, dispatch)
|
||||
│ ├── approval.py # Dangerous command detection
|
||||
│ ├── terminal_tool.py # Terminal orchestration
|
||||
│ ├── process_registry.py # Background process management
|
||||
│ ├── file_tools.py # File read/write/search/patch
|
||||
│ ├── web_tools.py # Web search/extract (Parallel + Firecrawl)
|
||||
│ ├── browser_tool.py # Browserbase browser automation
|
||||
│ ├── code_execution_tool.py # execute_code sandbox
|
||||
│ ├── delegate_tool.py # Subagent delegation
|
||||
│ ├── mcp_tool.py # MCP client (~1050 lines)
|
||||
│ └── environments/ # Terminal backends (local, docker, ssh, modal, daytona, singularity)
|
||||
├── gateway/ # Messaging gateway — run.py + session.py + platforms/
|
||||
│ ├── platforms/ # Adapter per platform (telegram, discord, slack, whatsapp,
|
||||
│ │ # homeassistant, signal, matrix, mattermost, email, sms,
|
||||
│ │ # dingtalk, wecom, weixin, feishu, qqbot, bluebubbles,
|
||||
│ │ # yuanbao, webhook, api_server, ...). See ADDING_A_PLATFORM.md.
|
||||
│ └── builtin_hooks/ # Extension point for always-registered gateway hooks (none shipped)
|
||||
├── plugins/ # Plugin system (see "Plugins" section below)
|
||||
│ ├── memory/ # Memory-provider plugins (honcho, mem0, supermemory, ...)
|
||||
│ ├── context_engine/ # Context-engine plugins
|
||||
│ ├── model-providers/ # Inference backend plugins (openrouter, anthropic, gmi, ...)
|
||||
│ ├── kanban/ # Multi-agent board dispatcher + worker plugin
|
||||
│ ├── hermes-achievements/ # Gamified achievement tracking
|
||||
│ ├── observability/ # Metrics / traces / logs plugin
|
||||
│ ├── image_gen/ # Image-generation providers
|
||||
│ └── <others>/ # disk-cleanup, example-dashboard, google_meet, platforms,
|
||||
│ # spotify, strike-freedom-cockpit, ...
|
||||
├── optional-skills/ # Heavier/niche skills shipped but NOT active by default
|
||||
├── skills/ # Built-in skills bundled with the repo
|
||||
├── ui-tui/ # Ink (React) terminal UI — `hermes --tui`
|
||||
│ └── src/ # entry.tsx, app.tsx, gatewayClient.ts + app/components/hooks/lib
|
||||
├── tui_gateway/ # Python JSON-RPC backend for the TUI
|
||||
├── gateway/ # Messaging platform gateway
|
||||
│ ├── run.py # Main loop, slash commands, message dispatch
|
||||
│ ├── session.py # SessionStore — conversation persistence
|
||||
│ └── platforms/ # Adapters: telegram, discord, slack, whatsapp, homeassistant, signal
|
||||
├── acp_adapter/ # ACP server (VS Code / Zed / JetBrains integration)
|
||||
├── cron/ # Scheduler — jobs.py, scheduler.py
|
||||
├── cron/ # Scheduler (jobs.py, scheduler.py)
|
||||
├── environments/ # RL training environments (Atropos)
|
||||
├── scripts/ # run_tests.sh, release.py, auxiliary scripts
|
||||
├── website/ # Docusaurus docs site
|
||||
└── tests/ # Pytest suite (~17k tests across ~900 files as of May 2026)
|
||||
├── tests/ # Pytest suite (~3000 tests)
|
||||
└── batch_runner.py # Parallel batch processing
|
||||
```
|
||||
|
||||
**User config:** `~/.hermes/config.yaml` (settings), `~/.hermes/.env` (API keys only).
|
||||
**Logs:** `~/.hermes/logs/` — `agent.log` (INFO+), `errors.log` (WARNING+),
|
||||
`gateway.log` when running the gateway. Profile-aware via `get_hermes_home()`.
|
||||
Browse with `hermes logs [--follow] [--level ...] [--session ...]`.
|
||||
|
||||
## TypeScript Style
|
||||
|
||||
Applies to TypeScript across Hermes: desktop, TUI, website, and future TS packages.
|
||||
|
||||
- Prefer small nanostores over component state when state is shared, reused, or read by distant UI.
|
||||
- Let each feature own its atoms. Chat state belongs near chat, shell state near shell, shared state in `src/store`.
|
||||
- Components that render from an atom should use `useStore`. Non-rendering actions should read with `$atom.get()`.
|
||||
- Do not pass state through three components when the leaf can subscribe to the atom.
|
||||
- Keep persistence beside the atom that owns it.
|
||||
- Keep route roots thin. They compose routes and shell; they should not become controllers.
|
||||
- No monolithic hooks. A hook should own one narrow job.
|
||||
- Prefer colocated action modules over hidden god hooks.
|
||||
- If a callback is pure side effect, use the terse void form:
|
||||
`onState={st => void setGatewayState(st)}`.
|
||||
- Async UI handlers should make intent explicit:
|
||||
`onClick={() => void save()}`.
|
||||
- Prefer interfaces for public props and shared object shapes. Avoid `type X = { ... }` for object props.
|
||||
- Extend React primitives for props: `React.ComponentProps<'button'>`, `React.ComponentProps<typeof Dialog>`, `Omit<...>`, `Pick<...>`.
|
||||
- Table-driven beats condition ladders when mapping ids, routes, or views.
|
||||
- `src/app` owns routes, pages, and page-specific components.
|
||||
- `src/store` owns shared atoms.
|
||||
- `src/lib` owns shared pure helpers.
|
||||
**User config:** `~/.hermes/config.yaml` (settings), `~/.hermes/.env` (API keys)
|
||||
|
||||
## File Dependency Chain
|
||||
|
||||
@@ -108,30 +81,20 @@ run_agent.py, cli.py, batch_runner.py, environments/
|
||||
|
||||
## AIAgent Class (run_agent.py)
|
||||
|
||||
The real `AIAgent.__init__` takes ~60 parameters (credentials, routing, callbacks,
|
||||
session context, budget, credential pool, etc.). The signature below is the
|
||||
minimum subset you'll usually touch — read `run_agent.py` for the full list.
|
||||
|
||||
```python
|
||||
class AIAgent:
|
||||
def __init__(self,
|
||||
base_url: str = None,
|
||||
api_key: str = None,
|
||||
provider: str = None,
|
||||
api_mode: str = None, # "chat_completions" | "codex_responses" | ...
|
||||
model: str = "", # empty → resolved from config/provider later
|
||||
max_iterations: int = 90, # tool-calling iterations (shared with subagents)
|
||||
model: str = "anthropic/claude-opus-4.6",
|
||||
max_iterations: int = 90,
|
||||
enabled_toolsets: list = None,
|
||||
disabled_toolsets: list = None,
|
||||
quiet_mode: bool = False,
|
||||
save_trajectories: bool = False,
|
||||
platform: str = None, # "cli", "telegram", etc.
|
||||
platform: str = None, # "cli", "telegram", etc.
|
||||
session_id: str = None,
|
||||
skip_context_files: bool = False,
|
||||
skip_memory: bool = False,
|
||||
credential_pool=None,
|
||||
# ... plus callbacks, thread/user/chat IDs, iteration_budget, fallback_model,
|
||||
# checkpoints config, prefill_messages, service_tier, reasoning_config, etc.
|
||||
# ... plus provider, api_mode, callbacks, routing params
|
||||
): ...
|
||||
|
||||
def chat(self, message: str) -> str:
|
||||
@@ -144,13 +107,10 @@ class AIAgent:
|
||||
|
||||
### Agent Loop
|
||||
|
||||
The core loop is inside `run_conversation()` — entirely synchronous, with
|
||||
interrupt checks, budget tracking, and a one-turn grace call:
|
||||
The core loop is inside `run_conversation()` — entirely synchronous:
|
||||
|
||||
```python
|
||||
while (api_call_count < self.max_iterations and self.iteration_budget.remaining > 0) \
|
||||
or self._budget_grace_call:
|
||||
if self._interrupt_requested: break
|
||||
while api_call_count < self.max_iterations and self.iteration_budget.remaining > 0:
|
||||
response = client.chat.completions.create(model=model, messages=messages, tools=tool_schemas)
|
||||
if response.tool_calls:
|
||||
for tool_call in response.tool_calls:
|
||||
@@ -161,8 +121,7 @@ while (api_call_count < self.max_iterations and self.iteration_budget.remaining
|
||||
return response.content
|
||||
```
|
||||
|
||||
Messages follow OpenAI format: `{"role": "system/user/assistant/tool", ...}`.
|
||||
Reasoning content is stored in `assistant_msg["reasoning"]`.
|
||||
Messages follow OpenAI format: `{"role": "system/user/assistant/tool", ...}`. Reasoning content is stored in `assistant_msg["reasoning"]`.
|
||||
|
||||
---
|
||||
|
||||
@@ -220,84 +179,9 @@ if canonical == "mycommand":
|
||||
|
||||
---
|
||||
|
||||
## TUI Architecture (ui-tui + tui_gateway)
|
||||
|
||||
The TUI is a full replacement for the classic (prompt_toolkit) CLI, activated via `hermes --tui` or `HERMES_TUI=1`.
|
||||
|
||||
### Process Model
|
||||
|
||||
```
|
||||
hermes --tui
|
||||
└─ Node (Ink) ──stdio JSON-RPC── Python (tui_gateway)
|
||||
│ └─ AIAgent + tools + sessions
|
||||
└─ renders transcript, composer, prompts, activity
|
||||
```
|
||||
|
||||
TypeScript owns the screen. Python owns sessions, tools, model calls, and slash command logic.
|
||||
|
||||
### Transport
|
||||
|
||||
Newline-delimited JSON-RPC over stdio. Requests from Ink, events from Python. See `tui_gateway/server.py` for the full method/event catalog.
|
||||
|
||||
### Key Surfaces
|
||||
|
||||
| Surface | Ink component | Gateway method |
|
||||
|---------|---------------|----------------|
|
||||
| Chat streaming | `app.tsx` + `messageLine.tsx` | `prompt.submit` → `message.delta/complete` |
|
||||
| Tool activity | `thinking.tsx` | `tool.start/progress/complete` |
|
||||
| Approvals | `prompts.tsx` | `approval.respond` ← `approval.request` |
|
||||
| Clarify/sudo/secret | `prompts.tsx`, `maskedPrompt.tsx` | `clarify/sudo/secret.respond` |
|
||||
| Session picker | `sessionPicker.tsx` | `session.list/resume` |
|
||||
| Slash commands | Local handler + fallthrough | `slash.exec` → `_SlashWorker`, `command.dispatch` |
|
||||
| Completions | `useCompletion` hook | `complete.slash`, `complete.path` |
|
||||
| Theming | `theme.ts` + `branding.tsx` | `gateway.ready` with skin data |
|
||||
|
||||
### Slash Command Flow
|
||||
|
||||
1. Built-in client commands (`/help`, `/quit`, `/clear`, `/resume`, `/copy`, `/paste`, etc.) handled locally in `app.tsx`
|
||||
2. Everything else → `slash.exec` (runs in persistent `_SlashWorker` subprocess) → `command.dispatch` fallback
|
||||
|
||||
### Dev Commands
|
||||
|
||||
```bash
|
||||
cd ui-tui
|
||||
npm install # first time
|
||||
npm run dev # watch mode (rebuilds hermes-ink + tsx --watch)
|
||||
npm start # production
|
||||
npm run build # full build (hermes-ink + tsc)
|
||||
npm run type-check # typecheck only (tsc --noEmit)
|
||||
npm run lint # eslint
|
||||
npm run fmt # prettier
|
||||
npm test # vitest
|
||||
```
|
||||
|
||||
### TUI in the Dashboard (`hermes dashboard` → `/chat`)
|
||||
|
||||
The dashboard embeds the real `hermes --tui` — **not** a rewrite. See `hermes_cli/pty_bridge.py` + the `@app.websocket("/api/pty")` endpoint in `hermes_cli/web_server.py`.
|
||||
|
||||
- Browser loads `apps/dashboard/src/pages/ChatPage.tsx`, which mounts xterm.js's `Terminal` with the WebGL renderer, `@xterm/addon-fit` for container-driven resize, and `@xterm/addon-unicode11` for modern wide-character widths.
|
||||
- `/api/pty?token=…` upgrades to a WebSocket; auth uses the same ephemeral `_SESSION_TOKEN` as REST, via query param (browsers can't set `Authorization` on WS upgrade).
|
||||
- The server spawns whatever `hermes --tui` would spawn, through `ptyprocess` (POSIX PTY — WSL works, native Windows does not).
|
||||
- Frames: raw PTY bytes each direction; resize via `\x1b[RESIZE:<cols>;<rows>]` intercepted on the server and applied with `TIOCSWINSZ`.
|
||||
|
||||
**Do not re-implement the primary chat experience in React.** The main transcript, composer/input flow (including slash-command behavior), and PTY-backed terminal belong to the embedded `hermes --tui` — anything new you add to Ink shows up in the dashboard automatically. If you find yourself rebuilding the transcript or composer for the dashboard, stop and extend Ink instead.
|
||||
|
||||
**Structured React UI around the TUI is allowed when it is not a second chat surface.** Sidebar widgets, inspectors, summaries, status panels, and similar supporting views (e.g. `ChatSidebar`, `ModelPickerDialog`, `ToolCall`) are fine when they complement the embedded TUI rather than replacing the transcript / composer / terminal. Keep their state independent of the PTY child's session and surface their failures non-destructively so the terminal pane keeps working unimpaired.
|
||||
|
||||
---
|
||||
|
||||
## Adding New Tools
|
||||
|
||||
For most custom or local-only tools, do **not** edit Hermes core. Use the plugin
|
||||
route instead: create `~/.hermes/plugins/<name>/plugin.yaml` and
|
||||
`~/.hermes/plugins/<name>/__init__.py`, then register tools with
|
||||
`ctx.register_tool(...)`. Plugin toolsets are discovered automatically and can be
|
||||
enabled or disabled without touching `tools/` or `toolsets.py`.
|
||||
|
||||
Use the built-in route below only when the user is explicitly contributing a new
|
||||
core Hermes tool that should ship in the base system.
|
||||
|
||||
Built-in/core tools require changes in **2 files**:
|
||||
Requires changes in **3 files**:
|
||||
|
||||
**1. Create `tools/your_tool.py`:**
|
||||
```python
|
||||
@@ -320,9 +204,9 @@ registry.register(
|
||||
)
|
||||
```
|
||||
|
||||
**2. Add to `toolsets.py`** — either `_HERMES_CORE_TOOLS` (all platforms) or a new toolset. **This step is required:** auto-discovery imports the tool and registers its schema, but the tool is only *exposed to an agent* if its name appears in a toolset. `_HERMES_CORE_TOOLS` is not dead code — it's the default bundle every platform's base toolset inherits from.
|
||||
**2. Add import** in `model_tools.py` `_discover_tools()` list.
|
||||
|
||||
Auto-discovery: any `tools/*.py` file with a top-level `registry.register()` call is imported automatically — no manual import list to maintain. Wiring into a toolset is still a deliberate, manual step.
|
||||
**3. Add to `toolsets.py`** — either `_HERMES_CORE_TOOLS` (all platforms) or a new toolset.
|
||||
|
||||
The registry handles schema collection, dispatch, availability checking, and error wrapping. All handlers MUST return a JSON string.
|
||||
|
||||
@@ -330,7 +214,7 @@ The registry handles schema collection, dispatch, availability checking, and err
|
||||
|
||||
**State files**: If a tool stores persistent state (caches, logs, checkpoints), use `get_hermes_home()` for the base directory — never `Path.home() / ".hermes"`. This ensures each profile gets its own state.
|
||||
|
||||
**Agent-level tools** (todo, memory): intercepted by `run_agent.py` before `handle_function_call()`. See `tools/todo_tool.py` for the pattern.
|
||||
**Agent-level tools** (todo, memory): intercepted by `run_agent.py` before `handle_function_call()`. See `todo_tool.py` for the pattern.
|
||||
|
||||
---
|
||||
|
||||
@@ -338,29 +222,9 @@ The registry handles schema collection, dispatch, availability checking, and err
|
||||
|
||||
### config.yaml options:
|
||||
1. Add to `DEFAULT_CONFIG` in `hermes_cli/config.py`
|
||||
2. Bump `_config_version` (check the current value at the top of `DEFAULT_CONFIG`)
|
||||
ONLY if you need to actively migrate/transform existing user config
|
||||
(renaming keys, changing structure). Adding a new key to an existing
|
||||
section is handled automatically by the deep-merge and does NOT require
|
||||
a version bump.
|
||||
2. Bump `_config_version` (currently 5) to trigger migration for existing users
|
||||
|
||||
### Top-level `config.yaml` sections (non-exhaustive):
|
||||
|
||||
`model`, `agent`, `terminal`, `compression`, `display`, `stt`, `tts`,
|
||||
`memory`, `security`, `delegation`, `smart_model_routing`, `checkpoints`,
|
||||
`auxiliary`, `curator`, `skills`, `gateway`, `logging`, `cron`, `profiles`,
|
||||
`plugins`, `honcho`.
|
||||
|
||||
`auxiliary` holds per-task overrides for side-LLM work (curator, vision,
|
||||
embedding, title generation, session_search, etc.) — each task can pin
|
||||
its own provider/model/base_url/max_tokens/reasoning_effort. See
|
||||
`agent/auxiliary_client.py::_resolve_auto` for resolution order.
|
||||
|
||||
`curator` holds the background skill-maintenance config —
|
||||
`enabled`, `interval_hours`, `min_idle_hours`, `stale_after_days`,
|
||||
`archive_after_days`, `backup` (nested).
|
||||
|
||||
### .env variables (SECRETS ONLY — API keys, tokens, passwords):
|
||||
### .env variables:
|
||||
1. Add to `OPTIONAL_ENV_VARS` in `hermes_cli/config.py` with metadata:
|
||||
```python
|
||||
"NEW_API_KEY": {
|
||||
@@ -372,29 +236,13 @@ its own provider/model/base_url/max_tokens/reasoning_effort. See
|
||||
},
|
||||
```
|
||||
|
||||
Non-secret settings (timeouts, thresholds, feature flags, paths, display
|
||||
preferences) belong in `config.yaml`, not `.env`. If internal code needs an
|
||||
env var mirror for backward compatibility, bridge it from `config.yaml` to
|
||||
the env var in code (see `gateway_timeout`, `terminal.cwd` → `TERMINAL_CWD`).
|
||||
|
||||
### Config loaders (three paths — know which one you're in):
|
||||
### Config loaders (two separate systems):
|
||||
|
||||
| Loader | Used by | Location |
|
||||
|--------|---------|----------|
|
||||
| `load_cli_config()` | CLI mode | `cli.py` — merges CLI-specific defaults + user YAML |
|
||||
| `load_config()` | `hermes tools`, `hermes setup`, most CLI subcommands | `hermes_cli/config.py` — merges `DEFAULT_CONFIG` + user YAML |
|
||||
| Direct YAML load | Gateway runtime | `gateway/run.py` + `gateway/config.py` — reads user YAML raw |
|
||||
|
||||
If you add a new key and the CLI sees it but the gateway doesn't (or vice
|
||||
versa), you're on the wrong loader. Check `DEFAULT_CONFIG` coverage.
|
||||
|
||||
### Working directory:
|
||||
- **CLI** — uses the process's current directory (`os.getcwd()`).
|
||||
- **Messaging** — uses `terminal.cwd` from `config.yaml`. The gateway bridges this
|
||||
to the `TERMINAL_CWD` env var for child tools. **`MESSAGING_CWD` has been
|
||||
removed** — the config loader prints a deprecation warning if it's set in
|
||||
`.env`. Same for `TERMINAL_CWD` in `.env`; the canonical setting is
|
||||
`terminal.cwd` in `config.yaml`.
|
||||
| `load_cli_config()` | CLI mode | `cli.py` |
|
||||
| `load_config()` | `hermes tools`, `hermes setup` | `hermes_cli/config.py` |
|
||||
| Direct YAML load | Gateway | `gateway/run.py` |
|
||||
|
||||
---
|
||||
|
||||
@@ -487,285 +335,7 @@ Activate with `/skin cyberpunk` or `display.skin: cyberpunk` in config.yaml.
|
||||
|
||||
---
|
||||
|
||||
## Plugins
|
||||
|
||||
Hermes has two plugin surfaces. Both live under `plugins/` in the repo so
|
||||
repo-shipped plugins can be discovered alongside user-installed ones in
|
||||
`~/.hermes/plugins/` and pip-installed entry points.
|
||||
|
||||
### General plugins (`hermes_cli/plugins.py` + `plugins/<name>/`)
|
||||
|
||||
`PluginManager` discovers plugins from `~/.hermes/plugins/`, `./.hermes/plugins/`,
|
||||
and pip entry points. Each plugin exposes a `register(ctx)` function that
|
||||
can:
|
||||
|
||||
- Register Python-callback lifecycle hooks:
|
||||
`pre_tool_call`, `post_tool_call`, `pre_llm_call`, `post_llm_call`,
|
||||
`on_session_start`, `on_session_end`
|
||||
- Register new tools via `ctx.register_tool(...)`
|
||||
- Register CLI subcommands via `ctx.register_cli_command(...)` — the
|
||||
plugin's argparse tree is wired into `hermes` at startup so
|
||||
`hermes <pluginname> <subcmd>` works with no change to `main.py`
|
||||
|
||||
Hooks are invoked from `model_tools.py` (pre/post tool) and `run_agent.py`
|
||||
(lifecycle). **Discovery timing pitfall:** `discover_plugins()` only runs
|
||||
as a side effect of importing `model_tools.py`. Code paths that read plugin
|
||||
state without importing `model_tools.py` first must call `discover_plugins()`
|
||||
explicitly (it's idempotent).
|
||||
|
||||
### Memory-provider plugins (`plugins/memory/<name>/`)
|
||||
|
||||
Separate discovery system for pluggable memory backends. Current built-in
|
||||
providers include **honcho, mem0, supermemory, byterover, hindsight,
|
||||
holographic, openviking, retaindb**.
|
||||
|
||||
Each provider implements the `MemoryProvider` ABC (see `agent/memory_provider.py`)
|
||||
and is orchestrated by `agent/memory_manager.py`. Lifecycle hooks include
|
||||
`sync_turn(turn_messages)`, `prefetch(query)`, `shutdown()`, and optional
|
||||
`post_setup(hermes_home, config)` for setup-wizard integration.
|
||||
|
||||
**CLI commands via `plugins/memory/<name>/cli.py`:** if a memory plugin
|
||||
defines `register_cli(subparser)`, `discover_plugin_cli_commands()` finds
|
||||
it at argparse setup time and wires it into `hermes <plugin>`. The
|
||||
framework only exposes CLI commands for the **currently active** memory
|
||||
provider (read from `memory.provider` in config.yaml), so disabled
|
||||
providers don't clutter `hermes --help`.
|
||||
|
||||
**Rule (Teknium, May 2026):** plugins MUST NOT modify core files
|
||||
(`run_agent.py`, `cli.py`, `gateway/run.py`, `hermes_cli/main.py`, etc.).
|
||||
If a plugin needs a capability the framework doesn't expose, expand the
|
||||
generic plugin surface (new hook, new ctx method) — never hardcode
|
||||
plugin-specific logic into core. PR #5295 removed 95 lines of hardcoded
|
||||
honcho argparse from `main.py` for exactly this reason.
|
||||
|
||||
### Model-provider plugins (`plugins/model-providers/<name>/`)
|
||||
|
||||
Every inference backend (openrouter, anthropic, gmi, deepseek, nvidia, …)
|
||||
ships as a plugin here. Each plugin's `__init__.py` calls
|
||||
`providers.register_provider(ProviderProfile(...))` at module load.
|
||||
`providers/__init__.py._discover_providers()` is a **lazy, separate
|
||||
discovery system** — scanned on first `get_provider_profile()` or
|
||||
`list_providers()` call, NOT by the general PluginManager.
|
||||
|
||||
Scan order:
|
||||
1. Bundled: `<repo>/plugins/model-providers/<name>/`
|
||||
2. User: `$HERMES_HOME/plugins/model-providers/<name>/`
|
||||
3. Legacy: `<repo>/providers/<name>.py` (back-compat)
|
||||
|
||||
User plugins of the same name override bundled ones — `register_provider()`
|
||||
is last-writer-wins. This lets third parties swap out any built-in
|
||||
profile without a repo patch.
|
||||
|
||||
The general PluginManager records `kind: model-provider` manifests but does
|
||||
NOT import them (would double-instantiate `ProviderProfile`). Plugins
|
||||
without an explicit `kind:` get auto-coerced via a source-text heuristic
|
||||
(`register_provider` + `ProviderProfile` in `__init__.py`).
|
||||
|
||||
Full authoring guide: `website/docs/developer-guide/model-provider-plugin.md`.
|
||||
|
||||
### Dashboard / context-engine / image-gen plugin directories
|
||||
|
||||
`plugins/context_engine/`, `plugins/image_gen/`, `plugins/example-dashboard/`,
|
||||
etc. follow the same pattern (ABC + orchestrator + per-plugin directory).
|
||||
Context engines plug into `agent/context_engine.py`; image-gen providers
|
||||
into `agent/image_gen_provider.py`.
|
||||
|
||||
---
|
||||
|
||||
## Skills
|
||||
|
||||
Two parallel surfaces:
|
||||
|
||||
- **`skills/`** — built-in skills shipped and loadable by default.
|
||||
Organized by category directories (e.g. `skills/github/`, `skills/mlops/`).
|
||||
- **`optional-skills/`** — heavier or niche skills shipped with the repo but
|
||||
NOT active by default. Installed explicitly via
|
||||
`hermes skills install official/<category>/<skill>`. Adapter lives in
|
||||
`tools/skills_hub.py` (`OptionalSkillSource`). Categories include
|
||||
`autonomous-ai-agents`, `blockchain`, `communication`, `creative`,
|
||||
`devops`, `email`, `health`, `mcp`, `migration`, `mlops`, `productivity`,
|
||||
`research`, `security`, `web-development`.
|
||||
|
||||
When reviewing skill PRs, check which directory they target — heavy-dep or
|
||||
niche skills belong in `optional-skills/`.
|
||||
|
||||
### SKILL.md frontmatter
|
||||
|
||||
Standard fields: `name`, `description`, `version`, `author`, `license`,
|
||||
`platforms` (OS-gating list: `[macos]`, `[linux, macos]`, ...),
|
||||
`metadata.hermes.tags`, `metadata.hermes.category`,
|
||||
`metadata.hermes.related_skills`, `metadata.hermes.config` (config.yaml
|
||||
settings the skill needs — stored under `skills.config.<key>`, prompted
|
||||
during setup, injected at load time).
|
||||
|
||||
Top-level `tags:` and `category:` are also accepted and mirrored from
|
||||
`metadata.hermes.*` by the loader.
|
||||
|
||||
---
|
||||
|
||||
## Toolsets
|
||||
|
||||
All toolsets are defined in `toolsets.py` as a single `TOOLSETS` dict.
|
||||
Each platform's adapter picks a base toolset (e.g. Telegram uses
|
||||
`"messaging"`); `_HERMES_CORE_TOOLS` is the default bundle most
|
||||
platforms inherit from.
|
||||
|
||||
Current toolset keys: `browser`, `clarify`, `code_execution`, `cronjob`,
|
||||
`debugging`, `delegation`, `discord`, `discord_admin`, `feishu_doc`,
|
||||
`feishu_drive`, `file`, `homeassistant`, `image_gen`, `kanban`, `memory`,
|
||||
`messaging`, `moa`, `rl`, `safe`, `search`, `session_search`, `skills`,
|
||||
`spotify`, `terminal`, `todo`, `tts`, `video`, `vision`, `web`, `yuanbao`.
|
||||
|
||||
Enable/disable per platform via `hermes tools` (the curses UI) or the
|
||||
`tools.<platform>.enabled` / `tools.<platform>.disabled` lists in
|
||||
`config.yaml`.
|
||||
|
||||
---
|
||||
|
||||
## Delegation (`delegate_task`)
|
||||
|
||||
`tools/delegate_tool.py` spawns a subagent with an isolated
|
||||
context + terminal session. Synchronous: the parent waits for the
|
||||
child's summary before continuing its own loop — if the parent is
|
||||
interrupted, the child is cancelled.
|
||||
|
||||
Two shapes:
|
||||
|
||||
- **Single:** pass `goal` (+ optional `context`, `toolsets`).
|
||||
- **Batch (parallel):** pass `tasks: [...]` — each gets its own subagent
|
||||
running concurrently. Concurrency is capped by
|
||||
`delegation.max_concurrent_children` (default 3).
|
||||
|
||||
Roles:
|
||||
|
||||
- `role="leaf"` (default) — focused worker. Cannot call `delegate_task`,
|
||||
`clarify`, `memory`, `send_message`, `execute_code`.
|
||||
- `role="orchestrator"` — retains `delegate_task` so it can spawn its
|
||||
own workers. Gated by `delegation.orchestrator_enabled` (default true)
|
||||
and bounded by `delegation.max_spawn_depth` (default 2).
|
||||
|
||||
Key config knobs (under `delegation:` in `config.yaml`):
|
||||
`max_concurrent_children`, `max_spawn_depth`, `child_timeout_seconds`,
|
||||
`orchestrator_enabled`, `subagent_auto_approve`, `inherit_mcp_toolsets`,
|
||||
`max_iterations`.
|
||||
|
||||
Synchronicity rule: delegate_task is **not** durable. For long-running
|
||||
work that must outlive the current turn, use `cronjob` or
|
||||
`terminal(background=True, notify_on_complete=True)` instead.
|
||||
|
||||
---
|
||||
|
||||
## Curator (skill lifecycle)
|
||||
|
||||
Background skill-maintenance system that tracks usage on agent-created
|
||||
skills and auto-archives stale ones. Users never lose skills; archives
|
||||
go to `~/.hermes/skills/.archive/` and are restorable.
|
||||
|
||||
- **Core:** `agent/curator.py` (review loop, auto-transitions, LLM review
|
||||
prompt) + `agent/curator_backup.py` (pre-run tar.gz snapshots).
|
||||
- **CLI:** `hermes_cli/curator.py` wires `hermes curator <verb>` where
|
||||
verbs are: `status`, `run`, `pause`, `resume`, `pin`, `unpin`,
|
||||
`archive`, `restore`, `prune`, `backup`, `rollback`.
|
||||
- **Telemetry:** `tools/skill_usage.py` owns the sidecar
|
||||
`~/.hermes/skills/.usage.json` — per-skill `use_count`, `view_count`,
|
||||
`patch_count`, `last_activity_at`, `state` (active / stale /
|
||||
archived), `pinned`.
|
||||
|
||||
Invariants:
|
||||
- Curator only touches skills with `created_by: "agent"` provenance —
|
||||
bundled + hub-installed skills are off-limits.
|
||||
- Never deletes; max destructive action is archive.
|
||||
- Pinned skills are exempt from every auto-transition and from the
|
||||
LLM review pass.
|
||||
- `skill_manage(action="delete")` refuses pinned skills; patch/edit/
|
||||
write_file/remove_file go through so the agent can keep improving
|
||||
pinned skills.
|
||||
|
||||
Config section (`curator:` in `config.yaml`):
|
||||
`enabled`, `interval_hours`, `min_idle_hours`, `stale_after_days`,
|
||||
`archive_after_days`, `backup.*`.
|
||||
|
||||
Full user-facing docs: `website/docs/user-guide/features/curator.md`.
|
||||
|
||||
---
|
||||
|
||||
## Cron (scheduled jobs)
|
||||
|
||||
`cron/jobs.py` (job store) + `cron/scheduler.py` (tick loop). Agents
|
||||
schedule jobs via the `cronjob` tool; users via `hermes cron <verb>`
|
||||
(`list`, `add`, `edit`, `pause`, `resume`, `run`, `remove`) or the
|
||||
`/cron` slash command.
|
||||
|
||||
Supported schedule formats:
|
||||
- Duration: `"30m"`, `"2h"`, `"1d"`
|
||||
- "every" phrase: `"every 2h"`, `"every monday 9am"`
|
||||
- 5-field cron expression: `"0 9 * * *"`
|
||||
- ISO timestamp (one-shot): `"2026-06-01T09:00:00Z"`
|
||||
|
||||
Per-job fields include `skills` (load specific skills), `model` /
|
||||
`provider` overrides, `script` (pre-run data-collection script whose
|
||||
stdout is injected into the prompt; `no_agent=True` turns the script
|
||||
into the entire job), `context_from` (chain job A's last output into
|
||||
job B's prompt), `workdir` (run in a specific directory with its
|
||||
`AGENTS.md`/`CLAUDE.md` loaded), and multi-platform delivery.
|
||||
|
||||
Hardening invariants:
|
||||
- **3-minute hard interrupt** on cron sessions — runaway agent loops
|
||||
cannot monopolize the scheduler.
|
||||
- Catchup window: half the job's period, clamped to 120s–2h.
|
||||
- Grace window: 120s for one-shot jobs whose fire time was missed.
|
||||
- File lock at `~/.hermes/cron/.tick.lock` prevents duplicate ticks
|
||||
across processes.
|
||||
- Cron sessions pass `skip_memory=True` by default; memory providers
|
||||
intentionally do not run during cron.
|
||||
|
||||
Cron deliveries are **not** mirrored into the target gateway session —
|
||||
they land in their own cron session with a header/footer frame so the
|
||||
main conversation's message-role alternation stays intact.
|
||||
|
||||
---
|
||||
|
||||
## Kanban (multi-agent work queue)
|
||||
|
||||
Durable SQLite-backed board that lets multiple profiles / workers
|
||||
collaborate on shared tasks. Users drive it via `hermes kanban <verb>`;
|
||||
workers spawned by the dispatcher drive it via a dedicated `kanban_*`
|
||||
toolset so their schema footprint is zero when they're not inside a
|
||||
kanban task.
|
||||
|
||||
- **CLI:** `hermes_cli/kanban.py` wires `hermes kanban` with verbs
|
||||
`init`, `create`, `list` (alias `ls`), `show`, `assign`, `link`,
|
||||
`unlink`, `comment`, `complete`, `block`, `unblock`, `archive`,
|
||||
`tail`, plus less-commonly-used `watch`, `stats`, `runs`, `log`,
|
||||
`assignees`, `heartbeat`, `notify-*`, `dispatch`, `daemon`, `gc`.
|
||||
- **Worker toolset:** `tools/kanban_tools.py` exposes `kanban_show`,
|
||||
`kanban_complete`, `kanban_block`, `kanban_heartbeat`, `kanban_comment`,
|
||||
`kanban_create`, `kanban_link` — gated by `HERMES_KANBAN_TASK` so
|
||||
the schema only appears for processes actually running as a worker.
|
||||
- **Dispatcher:** long-lived loop that (default every 60s) reclaims
|
||||
stale claims, promotes ready tasks, atomically claims, and spawns
|
||||
assigned profiles. Runs **inside the gateway** by default via
|
||||
`kanban.dispatch_in_gateway: true`.
|
||||
- **Plugin assets:** `plugins/kanban/dashboard/` (web UI) +
|
||||
`plugins/kanban/systemd/` (`hermes-kanban-dispatcher.service` for
|
||||
standalone dispatcher deployment).
|
||||
|
||||
Isolation model:
|
||||
- **Board** is the hard boundary — workers are spawned with
|
||||
`HERMES_KANBAN_BOARD` pinned in their env so they can't see other
|
||||
boards.
|
||||
- **Tenant** is a soft namespace *within* a board — one specialist
|
||||
fleet can serve multiple businesses with workspace-path + memory-key
|
||||
isolation.
|
||||
- After ~5 consecutive spawn failures on the same task the dispatcher
|
||||
auto-blocks it to prevent spin loops.
|
||||
|
||||
Full user-facing docs: `website/docs/user-guide/features/kanban.md`.
|
||||
|
||||
---
|
||||
|
||||
## Important Policies
|
||||
|
||||
### Prompt Caching Must Not Break
|
||||
|
||||
Hermes-Agent ensures caching remains valid throughout a conversation. **Do NOT implement changes that would:**
|
||||
@@ -775,16 +345,14 @@ Hermes-Agent ensures caching remains valid throughout a conversation. **Do NOT i
|
||||
|
||||
Cache-breaking forces dramatically higher costs. The ONLY time we alter context is during context compression.
|
||||
|
||||
Slash commands that mutate system-prompt state (skills, tools, memory, etc.)
|
||||
must be **cache-aware**: default to deferred invalidation (change takes
|
||||
effect next session), with an opt-in `--now` flag for immediate
|
||||
invalidation. See `/skills install --now` for the canonical pattern.
|
||||
### Working Directory Behavior
|
||||
- **CLI**: Uses current directory (`.` → `os.getcwd()`)
|
||||
- **Messaging**: Uses `MESSAGING_CWD` env var (default: home directory)
|
||||
|
||||
### Background Process Notifications (Gateway)
|
||||
|
||||
When `terminal(background=true, notify_on_complete=true)` is used, the gateway runs a watcher that
|
||||
detects process completion and triggers a new agent turn. Control verbosity of background process
|
||||
messages with `display.background_process_notifications`
|
||||
When `terminal(background=true, check_interval=...)` is used, the gateway runs a watcher that
|
||||
pushes status updates to the user's chat. Control verbosity with `display.background_process_notifications`
|
||||
in config.yaml (or `HERMES_BACKGROUND_NOTIFICATIONS` env var):
|
||||
|
||||
- `all` — running-output updates + final message (default)
|
||||
@@ -800,7 +368,7 @@ Hermes supports **profiles** — multiple fully isolated instances, each with it
|
||||
`HERMES_HOME` directory (config, API keys, memory, sessions, skills, gateway, etc.).
|
||||
|
||||
The core mechanism: `_apply_profile_override()` in `hermes_cli/main.py` sets
|
||||
`HERMES_HOME` before any module imports. All `get_hermes_home()` references
|
||||
`HERMES_HOME` before any module imports. All 119+ references to `get_hermes_home()`
|
||||
automatically scope to the active profile.
|
||||
|
||||
### Rules for profile-safe code
|
||||
@@ -857,12 +425,8 @@ Use `get_hermes_home()` from `hermes_constants` for code paths. Use `display_her
|
||||
for user-facing print/log messages. Hardcoding `~/.hermes` breaks profiles — each profile
|
||||
has its own `HERMES_HOME` directory. This was the source of 5 bugs fixed in PR #3575.
|
||||
|
||||
### DO NOT introduce new `simple_term_menu` usage
|
||||
Existing call sites in `hermes_cli/main.py` remain for legacy fallback only;
|
||||
the preferred UI is curses (stdlib) because `simple_term_menu` has
|
||||
ghost-duplication rendering bugs in tmux/iTerm2 with arrow keys. New
|
||||
interactive menus must use `hermes_cli/curses_ui.py` — see
|
||||
`hermes_cli/tools_config.py` for the canonical pattern.
|
||||
### DO NOT use `simple_term_menu` for interactive menus
|
||||
Rendering bugs in tmux/iTerm2 — ghosting on scroll. Use `curses` (stdlib) instead. See `hermes_cli/tools_config.py` for the pattern.
|
||||
|
||||
### DO NOT use `\033[K` (ANSI erase-to-EOL) in spinner/display code
|
||||
Leaks as literal `?[K` text under `prompt_toolkit`'s `patch_stdout`. Use space-padding: `f"\r{line}{' ' * pad}"`.
|
||||
@@ -873,30 +437,6 @@ Leaks as literal `?[K` text under `prompt_toolkit`'s `patch_stdout`. Use space-p
|
||||
### DO NOT hardcode cross-tool references in schema descriptions
|
||||
Tool schema descriptions must not mention tools from other toolsets by name (e.g., `browser_navigate` saying "prefer web_search"). Those tools may be unavailable (missing API keys, disabled toolset), causing the model to hallucinate calls to non-existent tools. If a cross-reference is needed, add it dynamically in `get_tool_definitions()` in `model_tools.py` — see the `browser_navigate` / `execute_code` post-processing blocks for the pattern.
|
||||
|
||||
### The gateway has TWO message guards — both must bypass approval/control commands
|
||||
When an agent is running, messages pass through two sequential guards:
|
||||
(1) **base adapter** (`gateway/platforms/base.py`) queues messages in
|
||||
`_pending_messages` when `session_key in self._active_sessions`, and
|
||||
(2) **gateway runner** (`gateway/run.py`) intercepts `/stop`, `/new`,
|
||||
`/queue`, `/status`, `/approve`, `/deny` before they reach
|
||||
`running_agent.interrupt()`. Any new command that must reach the runner
|
||||
while the agent is blocked (e.g. approval prompts) MUST bypass BOTH
|
||||
guards and be dispatched inline, not via `_process_message_background()`
|
||||
(which races session lifecycle).
|
||||
|
||||
### Squash merges from stale branches silently revert recent fixes
|
||||
Before squash-merging a PR, ensure the branch is up to date with `main`
|
||||
(`git fetch origin main && git reset --hard origin/main` in the worktree,
|
||||
then re-apply the PR's commits). A stale branch's version of an unrelated
|
||||
file will silently overwrite recent fixes on main when squashed. Verify
|
||||
with `git diff HEAD~1..HEAD` after merging — unexpected deletions are a
|
||||
red flag.
|
||||
|
||||
### Don't wire in dead code without E2E validation
|
||||
Unused code that was never shipped was dead for a reason. Before wiring an
|
||||
unused module into a live code path, E2E test the real resolution chain
|
||||
with actual imports (not mocks) against a temp `HERMES_HOME`.
|
||||
|
||||
### Tests must not write to `~/.hermes/`
|
||||
The `_isolate_hermes_home` autouse fixture in `tests/conftest.py` redirects `HERMES_HOME` to a temp dir. Never hardcode `~/.hermes/` paths in tests.
|
||||
|
||||
@@ -917,94 +457,13 @@ def profile_env(tmp_path, monkeypatch):
|
||||
|
||||
## Testing
|
||||
|
||||
**ALWAYS use `scripts/run_tests.sh`** — do not call `pytest` directly. The script enforces
|
||||
hermetic environment parity with CI (unset credential vars, TZ=UTC, LANG=C.UTF-8,
|
||||
4 xdist workers matching GHA ubuntu-latest). Direct `pytest` on a 16+ core
|
||||
developer machine with API keys set diverges from CI in ways that have caused
|
||||
multiple "works locally, fails in CI" incidents (and the reverse).
|
||||
|
||||
```bash
|
||||
scripts/run_tests.sh # full suite, CI-parity
|
||||
scripts/run_tests.sh tests/gateway/ # one directory
|
||||
scripts/run_tests.sh tests/agent/test_foo.py::test_x # one test
|
||||
scripts/run_tests.sh -v --tb=long # pass-through pytest flags
|
||||
source venv/bin/activate
|
||||
python -m pytest tests/ -q # Full suite (~3000 tests, ~3 min)
|
||||
python -m pytest tests/test_model_tools.py -q # Toolset resolution
|
||||
python -m pytest tests/test_cli_init.py -q # CLI config loading
|
||||
python -m pytest tests/gateway/ -q # Gateway tests
|
||||
python -m pytest tests/tools/ -q # Tool-level tests
|
||||
```
|
||||
|
||||
### Why the wrapper (and why the old "just call pytest" doesn't work)
|
||||
|
||||
Five real sources of local-vs-CI drift the script closes:
|
||||
|
||||
| | Without wrapper | With wrapper |
|
||||
|---|---|---|
|
||||
| Provider API keys | Whatever is in your env (auto-detects pool) | All `*_API_KEY`/`*_TOKEN`/etc. unset |
|
||||
| HOME / `~/.hermes/` | Your real config+auth.json | Temp dir per test |
|
||||
| Timezone | Local TZ (PDT etc.) | UTC |
|
||||
| Locale | Whatever is set | C.UTF-8 |
|
||||
| xdist workers | `-n auto` = all cores (20+ on a workstation) | `-n 4` matching CI |
|
||||
|
||||
`tests/conftest.py` also enforces points 1-4 as an autouse fixture so ANY pytest
|
||||
invocation (including IDE integrations) gets hermetic behavior — but the wrapper
|
||||
is belt-and-suspenders.
|
||||
|
||||
### Running without the wrapper (only if you must)
|
||||
|
||||
If you can't use the wrapper (e.g. on Windows or inside an IDE that shells
|
||||
pytest directly), at minimum activate the venv and pass `-n 4`:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate # or: source venv/bin/activate
|
||||
python -m pytest tests/ -q -n 4
|
||||
```
|
||||
|
||||
Worker count above 4 will surface test-ordering flakes that CI never sees.
|
||||
|
||||
Always run the full suite before pushing changes.
|
||||
|
||||
### Don't write change-detector tests
|
||||
|
||||
A test is a **change-detector** if it fails whenever data that is **expected
|
||||
to change** gets updated — model catalogs, config version numbers,
|
||||
enumeration counts, hardcoded lists of provider models. These tests add no
|
||||
behavioral coverage; they just guarantee that routine source updates break
|
||||
CI and cost engineering time to "fix."
|
||||
|
||||
**Do not write:**
|
||||
|
||||
```python
|
||||
# catalog snapshot — breaks every model release
|
||||
assert "gemini-2.5-pro" in _PROVIDER_MODELS["gemini"]
|
||||
assert "MiniMax-M2.7" in models
|
||||
|
||||
# config version literal — breaks every schema bump
|
||||
assert DEFAULT_CONFIG["_config_version"] == 21
|
||||
|
||||
# enumeration count — breaks every time a skill/provider is added
|
||||
assert len(_PROVIDER_MODELS["huggingface"]) == 8
|
||||
```
|
||||
|
||||
**Do write:**
|
||||
|
||||
```python
|
||||
# behavior: does the catalog plumbing work at all?
|
||||
assert "gemini" in _PROVIDER_MODELS
|
||||
assert len(_PROVIDER_MODELS["gemini"]) >= 1
|
||||
|
||||
# behavior: does migration bump the user's version to current latest?
|
||||
assert raw["_config_version"] == DEFAULT_CONFIG["_config_version"]
|
||||
|
||||
# invariant: no plan-only model leaks into the legacy list
|
||||
assert not (set(moonshot_models) & coding_plan_only_models)
|
||||
|
||||
# invariant: every model in the catalog has a context-length entry
|
||||
for m in _PROVIDER_MODELS["huggingface"]:
|
||||
assert m.lower() in DEFAULT_CONTEXT_LENGTHS_LOWER
|
||||
```
|
||||
|
||||
The rule: if the test reads like a snapshot of current data, delete it. If
|
||||
it reads like a contract about how two pieces of data must relate, keep it.
|
||||
When a PR adds a new provider/model and you want a test, make the test
|
||||
assert the relationship (e.g. "catalog entries all have context lengths"),
|
||||
not the specific names.
|
||||
|
||||
Reviewers should reject new change-detector tests; authors should convert
|
||||
them into invariants before re-requesting review.
|
||||
|
||||
@@ -9,7 +9,7 @@ Thank you for contributing to Hermes Agent! This guide covers everything you nee
|
||||
We value contributions in this order:
|
||||
|
||||
1. **Bug fixes** — crashes, incorrect behavior, data loss. Always top priority.
|
||||
2. **Cross-platform compatibility** — macOS, different Linux distros, and WSL2 on Windows. We want Hermes to work everywhere.
|
||||
2. **Cross-platform compatibility** — Windows, macOS, different Linux distros, different terminal emulators. We want Hermes to work everywhere.
|
||||
3. **Security hardening** — shell injection, prompt injection, path traversal, privilege escalation. See [Security](#security-considerations).
|
||||
4. **Performance and robustness** — retry logic, error handling, graceful degradation.
|
||||
5. **New skills** — but only broadly useful ones. See [Should it be a Skill or a Tool?](#should-it-be-a-skill-or-a-tool)
|
||||
@@ -55,10 +55,10 @@ If your skill is specialized, community-contributed, or niche, it's better suite
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|-------|
|
||||
| **Git** | With `--recurse-submodules` support, and the `git-lfs` extension installed |
|
||||
| **Git** | With `--recurse-submodules` support |
|
||||
| **Python 3.11+** | uv will install it if missing |
|
||||
| **uv** | Fast Python package manager ([install](https://docs.astral.sh/uv/)) |
|
||||
| **Node.js 20+** | Optional — needed for browser tools and WhatsApp bridge (matches root `package.json` engines) |
|
||||
| **Node.js 18+** | Optional — needed for browser tools and WhatsApp bridge |
|
||||
|
||||
### Clone and install
|
||||
|
||||
@@ -88,7 +88,7 @@ cp cli-config.yaml.example ~/.hermes/config.yaml
|
||||
touch ~/.hermes/.env
|
||||
|
||||
# Add at minimum an LLM provider key:
|
||||
echo "OPENROUTER_API_KEY=***" >> ~/.hermes/.env
|
||||
echo 'OPENROUTER_API_KEY=sk-or-v1-your-key' >> ~/.hermes/.env
|
||||
```
|
||||
|
||||
### Run
|
||||
@@ -106,11 +106,6 @@ hermes chat -q "Hello"
|
||||
### Run tests
|
||||
|
||||
```bash
|
||||
# Preferred — matches CI (hermetic env, 4 xdist workers); see AGENTS.md
|
||||
scripts/run_tests.sh
|
||||
|
||||
# Alternative (activate the venv first). The wrapper is still recommended
|
||||
# for parity with GitHub Actions before you open a PR:
|
||||
pytest tests/ -v
|
||||
```
|
||||
|
||||
@@ -291,18 +286,16 @@ registry.register(
|
||||
)
|
||||
```
|
||||
|
||||
**Wire into a toolset (required):** Built-in tools are auto-discovered: any
|
||||
`tools/*.py` file that contains a top-level `registry.register(...)` call is
|
||||
imported by `discover_builtin_tools()` in `tools/registry.py` when `model_tools`
|
||||
loads. There is **no** manual import list in `model_tools.py` to maintain.
|
||||
Then add the import to `model_tools.py` in the `_modules` list:
|
||||
|
||||
You must still add the tool name to the appropriate list in `toolsets.py`
|
||||
(for example `_HERMES_CORE_TOOLS` or a dedicated toolset); otherwise the tool
|
||||
registers but is never exposed to the agent. If you introduce a new toolset,
|
||||
add it in `toolsets.py` and wire it into the relevant platform presets.
|
||||
```python
|
||||
_modules = [
|
||||
# ... existing modules ...
|
||||
"tools.my_tool",
|
||||
]
|
||||
```
|
||||
|
||||
See `AGENTS.md` (section **Adding New Tools**) for profile-aware paths and
|
||||
plugin vs core guidance.
|
||||
If it's a new toolset, add it to `toolsets.py` and to the relevant platform presets.
|
||||
|
||||
---
|
||||
|
||||
@@ -501,7 +494,7 @@ branding:
|
||||
agent_name: "My Agent"
|
||||
welcome: "Welcome message"
|
||||
response_label: " ⚔ Agent "
|
||||
prompt_symbol: "⚔"
|
||||
prompt_symbol: "⚔ ❯ "
|
||||
|
||||
tool_prefix: "╎" # Tool output line prefix
|
||||
```
|
||||
@@ -522,7 +515,7 @@ See `hermes_cli/skin_engine.py` for the full schema and existing skins as exampl
|
||||
|
||||
## Cross-Platform Compatibility
|
||||
|
||||
Hermes runs on Linux, macOS, and WSL2 on Windows. When writing code that touches the OS:
|
||||
Hermes runs on Linux, macOS, and Windows. When writing code that touches the OS:
|
||||
|
||||
### Critical rules
|
||||
|
||||
@@ -602,9 +595,9 @@ refactor/description # Code restructuring
|
||||
|
||||
### Before submitting
|
||||
|
||||
1. **Run tests**: `scripts/run_tests.sh` (recommended; same as CI) or `pytest tests/ -v` with the project venv activated
|
||||
1. **Run tests**: `pytest tests/ -v`
|
||||
2. **Test manually**: Run `hermes` and exercise the code path you changed
|
||||
3. **Check cross-platform impact**: If you touch file I/O, process management, or terminal handling, consider macOS, Linux, and WSL2
|
||||
3. **Check cross-platform impact**: If you touch file I/O, process management, or terminal handling, consider Windows and macOS
|
||||
4. **Keep PRs focused**: One logical change per PR. Don't mix a bug fix with a refactor with a new feature.
|
||||
|
||||
### PR description
|
||||
|
||||
84
Dockerfile
84
Dockerfile
@@ -1,89 +1,25 @@
|
||||
FROM ghcr.io/astral-sh/uv:0.11.6-python3.13-trixie@sha256:b3c543b6c4f23a5f2df22866bd7857e5d304b67a564f4feab6ac22044dde719b AS uv_source
|
||||
FROM tianon/gosu:1.19-trixie@sha256:3b176695959c71e123eb390d427efc665eeb561b1540e82679c15e992006b8b9 AS gosu_source
|
||||
FROM debian:13.4
|
||||
|
||||
# Disable Python stdout buffering to ensure logs are printed immediately
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
# Store Playwright browsers outside the volume mount so the build-time
|
||||
# install survives the /opt/data volume overlay at runtime.
|
||||
ENV PLAYWRIGHT_BROWSERS_PATH=/opt/hermes/.playwright
|
||||
|
||||
# Install system dependencies in one layer, clear APT cache
|
||||
# tini reaps orphaned zombie processes (MCP stdio subprocesses, git, bun, etc.)
|
||||
# that would otherwise accumulate when hermes runs as PID 1. See #15012.
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential curl nodejs npm python3 ripgrep ffmpeg gcc python3-dev libffi-dev procps git openssh-client docker-cli tini && \
|
||||
build-essential nodejs npm python3 python3-pip ripgrep ffmpeg gcc python3-dev libffi-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Non-root user for runtime; UID can be overridden via HERMES_UID at runtime
|
||||
RUN useradd -u 10000 -m -d /opt/data hermes
|
||||
|
||||
COPY --chmod=0755 --from=gosu_source /gosu /usr/local/bin/
|
||||
COPY --chmod=0755 --from=uv_source /usr/local/bin/uv /usr/local/bin/uvx /usr/local/bin/
|
||||
|
||||
COPY . /opt/hermes
|
||||
WORKDIR /opt/hermes
|
||||
|
||||
# ---------- Layer-cached dependency install ----------
|
||||
# Copy only package manifests first so npm install + Playwright are cached
|
||||
# unless the lockfiles themselves change.
|
||||
#
|
||||
# ui-tui/packages/hermes-ink/ is copied IN FULL (not just its manifests)
|
||||
# because it is referenced as a `file:` workspace dependency from
|
||||
# ui-tui/package.json. Copying the tree up front lets npm resolve the
|
||||
# workspace to real content instead of stopping at a bare package.json.
|
||||
COPY package.json package-lock.json ./
|
||||
COPY web/package.json web/package-lock.json web/
|
||||
COPY ui-tui/package.json ui-tui/package-lock.json ui-tui/
|
||||
COPY ui-tui/packages/hermes-ink/ ui-tui/packages/hermes-ink/
|
||||
|
||||
# `npm_config_install_links=false` forces npm to install `file:` deps as
|
||||
# symlinks (the npm 10+ default) even on Debian's older bundled npm 9.x,
|
||||
# which defaults to `install-links=true` and installs file deps as *copies*.
|
||||
# The host-side package-lock.json is generated with a newer npm that uses
|
||||
# symlinks, so an install-as-copy produces a hidden node_modules/.package-lock.json
|
||||
# that permanently disagrees with the root lock on the @hermes/ink entry.
|
||||
# That disagreement trips the TUI launcher's `_tui_need_npm_install()`
|
||||
# check on every startup and triggers a runtime `npm install` that then
|
||||
# fails with EACCES (node_modules/ is root-owned from build time).
|
||||
ENV npm_config_install_links=false
|
||||
|
||||
RUN npm install --prefer-offline --no-audit && \
|
||||
# Install Python and Node dependencies in one layer, no cache
|
||||
RUN pip install --no-cache-dir -e ".[all]" --break-system-packages && \
|
||||
npm install --prefer-offline --no-audit && \
|
||||
npx playwright install --with-deps chromium --only-shell && \
|
||||
(cd web && npm install --prefer-offline --no-audit) && \
|
||||
(cd ui-tui && npm install --prefer-offline --no-audit) && \
|
||||
cd /opt/hermes/scripts/whatsapp-bridge && \
|
||||
npm install --prefer-offline --no-audit && \
|
||||
npm cache clean --force
|
||||
|
||||
# ---------- Source code ----------
|
||||
# .dockerignore excludes node_modules, so the installs above survive.
|
||||
COPY --chown=hermes:hermes . .
|
||||
WORKDIR /opt/hermes
|
||||
RUN chmod +x /opt/hermes/docker/entrypoint.sh
|
||||
|
||||
# Build browser dashboard and terminal UI assets.
|
||||
RUN cd web && npm run build && \
|
||||
cd ../ui-tui && npm run build
|
||||
|
||||
# ---------- Permissions ----------
|
||||
# Make install dir world-readable so any HERMES_UID can read it at runtime.
|
||||
# The venv needs to be traversable too.
|
||||
# node_modules trees additionally need to be writable by the hermes user
|
||||
# so the runtime `npm install` triggered by _tui_need_npm_install() in
|
||||
# hermes_cli/main.py succeeds (see #18800). /opt/hermes/web is build-time
|
||||
# only (HERMES_WEB_DIST points at hermes_cli/web_dist) and is intentionally
|
||||
# not chowned here.
|
||||
USER root
|
||||
RUN chmod -R a+rX /opt/hermes && \
|
||||
chown -R hermes:hermes /opt/hermes/ui-tui /opt/hermes/node_modules
|
||||
# Start as root so the entrypoint can usermod/groupmod + gosu.
|
||||
# If HERMES_UID is unset, the entrypoint drops to the default hermes user (10000).
|
||||
|
||||
# ---------- Python virtualenv ----------
|
||||
RUN uv venv && \
|
||||
uv pip install --no-cache-dir -e ".[all]"
|
||||
|
||||
# ---------- Runtime ----------
|
||||
ENV HERMES_WEB_DIST=/opt/hermes/hermes_cli/web_dist
|
||||
ENV HERMES_HOME=/opt/data
|
||||
ENV PATH="/opt/data/.local/bin:${PATH}"
|
||||
VOLUME [ "/opt/data" ]
|
||||
ENTRYPOINT [ "/usr/bin/tini", "-g", "--", "/opt/hermes/docker/entrypoint.sh" ]
|
||||
ENTRYPOINT [ "/opt/hermes/docker/entrypoint.sh" ]
|
||||
|
||||
34
README.md
34
README.md
@@ -9,12 +9,11 @@
|
||||
<a href="https://discord.gg/NousResearch"><img src="https://img.shields.io/badge/Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Discord"></a>
|
||||
<a href="https://github.com/NousResearch/hermes-agent/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-MIT-green?style=for-the-badge" alt="License: MIT"></a>
|
||||
<a href="https://nousresearch.com"><img src="https://img.shields.io/badge/Built%20by-Nous%20Research-blueviolet?style=for-the-badge" alt="Built by Nous Research"></a>
|
||||
<a href="README.zh-CN.md"><img src="https://img.shields.io/badge/Lang-中文-red?style=for-the-badge" alt="中文"></a>
|
||||
</p>
|
||||
|
||||
**The self-improving AI agent built by [Nous Research](https://nousresearch.com).** It's the only agent with a built-in learning loop — it creates skills from experience, improves them during use, nudges itself to persist knowledge, searches its own past conversations, and builds a deepening model of who you are across sessions. Run it on a $5 VPS, a GPU cluster, or serverless infrastructure that costs nearly nothing when idle. It's not tied to your laptop — talk to it from Telegram while it works on a cloud VM.
|
||||
|
||||
Use any model you want — [Nous Portal](https://portal.nousresearch.com), [OpenRouter](https://openrouter.ai) (200+ models), [NVIDIA NIM](https://build.nvidia.com) (Nemotron), [Xiaomi MiMo](https://platform.xiaomimimo.com), [z.ai/GLM](https://z.ai), [Kimi/Moonshot](https://platform.moonshot.ai), [MiniMax](https://www.minimax.io), [Hugging Face](https://huggingface.co), OpenAI, or your own endpoint. Switch with `hermes model` — no code changes, no lock-in.
|
||||
Use any model you want — [Nous Portal](https://portal.nousresearch.com), [OpenRouter](https://openrouter.ai) (200+ models), [z.ai/GLM](https://z.ai), [Kimi/Moonshot](https://platform.moonshot.ai), [MiniMax](https://www.minimax.io), OpenAI, or your own endpoint. Switch with `hermes model` — no code changes, no lock-in.
|
||||
|
||||
<table>
|
||||
<tr><td><b>A real terminal interface</b></td><td>Full TUI with multiline editing, slash-command autocomplete, conversation history, interrupt-and-redirect, and streaming tool output.</td></tr>
|
||||
@@ -22,7 +21,7 @@ Use any model you want — [Nous Portal](https://portal.nousresearch.com), [Open
|
||||
<tr><td><b>A closed learning loop</b></td><td>Agent-curated memory with periodic nudges. Autonomous skill creation after complex tasks. Skills self-improve during use. FTS5 session search with LLM summarization for cross-session recall. <a href="https://github.com/plastic-labs/honcho">Honcho</a> dialectic user modeling. Compatible with the <a href="https://agentskills.io">agentskills.io</a> open standard.</td></tr>
|
||||
<tr><td><b>Scheduled automations</b></td><td>Built-in cron scheduler with delivery to any platform. Daily reports, nightly backups, weekly audits — all in natural language, running unattended.</td></tr>
|
||||
<tr><td><b>Delegates and parallelizes</b></td><td>Spawn isolated subagents for parallel workstreams. Write Python scripts that call tools via RPC, collapsing multi-step pipelines into zero-context-cost turns.</td></tr>
|
||||
<tr><td><b>Runs anywhere, not just your laptop</b></td><td>Seven terminal backends — local, Docker, SSH, Singularity, Modal, Daytona, and Vercel Sandbox. Daytona and Modal offer serverless persistence — your agent's environment hibernates when idle and wakes on demand, costing nearly nothing between sessions. Run it on a $5 VPS or a GPU cluster.</td></tr>
|
||||
<tr><td><b>Runs anywhere, not just your laptop</b></td><td>Six terminal backends — local, Docker, SSH, Daytona, Singularity, and Modal. Daytona and Modal offer serverless persistence — your agent's environment hibernates when idle and wakes on demand, costing nearly nothing between sessions. Run it on a $5 VPS or a GPU cluster.</td></tr>
|
||||
<tr><td><b>Research-ready</b></td><td>Batch trajectory generation, Atropos RL environments, trajectory compression for training the next generation of tool-calling models.</td></tr>
|
||||
</table>
|
||||
|
||||
@@ -34,10 +33,8 @@ Use any model you want — [Nous Portal](https://portal.nousresearch.com), [Open
|
||||
curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash
|
||||
```
|
||||
|
||||
Works on Linux, macOS, WSL2, and Android via Termux. The installer handles the platform-specific setup for you.
|
||||
Works on Linux, macOS, and WSL2. The installer handles everything — Python, Node.js, dependencies, and the `hermes` command. No prerequisites except git.
|
||||
|
||||
> **Android / Termux:** The tested manual path is documented in the [Termux guide](https://hermes-agent.nousresearch.com/docs/getting-started/termux). On Termux, Hermes installs a curated `.[termux]` extra because the full `.[all]` extra currently pulls Android-incompatible voice dependencies.
|
||||
>
|
||||
> **Windows:** Native Windows is not supported. Please install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) and run the command above.
|
||||
|
||||
After installation:
|
||||
@@ -77,7 +74,7 @@ Hermes has two entry points: start the terminal UI with `hermes`, or run the gat
|
||||
| Set a personality | `/personality [name]` | `/personality [name]` |
|
||||
| Retry or undo the last turn | `/retry`, `/undo` | `/retry`, `/undo` |
|
||||
| Compress context / check usage | `/compress`, `/usage`, `/insights [--days N]` | `/compress`, `/usage`, `/insights [days]` |
|
||||
| Browse skills | `/skills` or `/<skill-name>` | `/<skill-name>` |
|
||||
| Browse skills | `/skills` or `/<skill-name>` | `/skills` or `/<skill-name>` |
|
||||
| Interrupt current work | `Ctrl+C` or send a new message | `/stop` or send a new message |
|
||||
| Platform-specific status | `/platforms` | `/status`, `/sethome` |
|
||||
|
||||
@@ -142,26 +139,23 @@ See `hermes claw migrate --help` for all options, or use the `openclaw-migration
|
||||
|
||||
We welcome contributions! See the [Contributing Guide](https://hermes-agent.nousresearch.com/docs/developer-guide/contributing) for development setup, code style, and PR process.
|
||||
|
||||
Quick start for contributors — clone and go with `setup-hermes.sh`:
|
||||
Quick start for contributors:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/NousResearch/hermes-agent.git
|
||||
cd hermes-agent
|
||||
./setup-hermes.sh # installs uv, creates venv, installs .[all], symlinks ~/.local/bin/hermes
|
||||
./hermes # auto-detects the venv, no need to `source` first
|
||||
```
|
||||
|
||||
Manual path (equivalent to the above):
|
||||
|
||||
```bash
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
uv venv .venv --python 3.11
|
||||
source .venv/bin/activate
|
||||
uv venv venv --python 3.11
|
||||
source venv/bin/activate
|
||||
uv pip install -e ".[all,dev]"
|
||||
scripts/run_tests.sh
|
||||
python -m pytest tests/ -q
|
||||
```
|
||||
|
||||
> **RL Training (optional):** The RL/Atropos integration (`environments/`) — see [`CONTRIBUTING.md`](https://github.com/NousResearch/hermes-agent/blob/main/CONTRIBUTING.md#development-setup) for the full setup.
|
||||
> **RL Training (optional):** To work on the RL/Tinker-Atropos integration:
|
||||
> ```bash
|
||||
> git submodule update --init tinker-atropos
|
||||
> uv pip install -e "./tinker-atropos"
|
||||
> ```
|
||||
|
||||
---
|
||||
|
||||
@@ -170,7 +164,7 @@ scripts/run_tests.sh
|
||||
- 💬 [Discord](https://discord.gg/NousResearch)
|
||||
- 📚 [Skills Hub](https://agentskills.io)
|
||||
- 🐛 [Issues](https://github.com/NousResearch/hermes-agent/issues)
|
||||
- 🔌 [HermesClaw](https://github.com/AaronWong1999/hermesclaw) — Community WeChat bridge: Run Hermes Agent and OpenClaw on the same WeChat account.
|
||||
- 💡 [Discussions](https://github.com/NousResearch/hermes-agent/discussions)
|
||||
|
||||
---
|
||||
|
||||
|
||||
186
README.zh-CN.md
186
README.zh-CN.md
@@ -1,186 +0,0 @@
|
||||
<p align="center">
|
||||
<img src="assets/banner.png" alt="Hermes Agent" width="100%">
|
||||
</p>
|
||||
|
||||
# Hermes Agent ☤
|
||||
|
||||
<p align="center">
|
||||
<a href="https://hermes-agent.nousresearch.com/docs/"><img src="https://img.shields.io/badge/Docs-hermes--agent.nousresearch.com-FFD700?style=for-the-badge" alt="Documentation"></a>
|
||||
<a href="https://discord.gg/NousResearch"><img src="https://img.shields.io/badge/Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Discord"></a>
|
||||
<a href="https://github.com/NousResearch/hermes-agent/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-MIT-green?style=for-the-badge" alt="License: MIT"></a>
|
||||
<a href="https://nousresearch.com"><img src="https://img.shields.io/badge/Built%20by-Nous%20Research-blueviolet?style=for-the-badge" alt="Built by Nous Research"></a>
|
||||
<a href="README.md"><img src="https://img.shields.io/badge/Lang-English-lightgrey?style=for-the-badge" alt="English"></a>
|
||||
</p>
|
||||
|
||||
**由 [Nous Research](https://nousresearch.com) 构建的自进化 AI 代理。** 它是唯一内置学习闭环的智能代理——从经验中创建技能,在使用中改进技能,主动持久化知识,搜索过往对话,并在跨会话中逐步构建对你的深度理解。可以在 $5 的 VPS 上运行,也可以在 GPU 集群上运行,或者使用几乎零成本的 Serverless 基础设施。它不绑定你的笔记本——你可以在 Telegram 上与它对话,而它在云端 VM 上工作。
|
||||
|
||||
支持任意模型——[Nous Portal](https://portal.nousresearch.com)、[OpenRouter](https://openrouter.ai)(200+ 模型)、[NVIDIA NIM](https://build.nvidia.com)(Nemotron)、[小米 MiMo](https://platform.xiaomimimo.com)、[z.ai/GLM](https://z.ai)、[Kimi/Moonshot](https://platform.moonshot.ai)、[MiniMax](https://www.minimax.io)、[Hugging Face](https://huggingface.co)、OpenAI,或自定义端点。使用 `hermes model` 即可切换——无需改代码,无锁定。
|
||||
|
||||
<table>
|
||||
<tr><td><b>真正的终端界面</b></td><td>完整的 TUI,支持多行编辑、斜杠命令自动补全、对话历史、中断重定向和流式工具输出。</td></tr>
|
||||
<tr><td><b>随你所在</b></td><td>Telegram、Discord、Slack、WhatsApp、Signal 和 CLI——全部从单个网关进程运行。语音备忘录转写、跨平台对话连续性。</td></tr>
|
||||
<tr><td><b>闭环学习</b></td><td>代理管理记忆并定期自我提醒。复杂任务后自动创建技能。技能在使用中自我改进。FTS5 会话搜索配合 LLM 摘要实现跨会话回溯。<a href="https://github.com/plastic-labs/honcho">Honcho</a> 辩证式用户建模。兼容 <a href="https://agentskills.io">agentskills.io</a> 开放标准。</td></tr>
|
||||
<tr><td><b>定时自动化</b></td><td>内置 cron 调度器,支持向任何平台投递。日报、夜间备份、周审计——全部用自然语言描述,无人值守运行。</td></tr>
|
||||
<tr><td><b>委派与并行</b></td><td>生成隔离子代理处理并行工作流。编写 Python 脚本通过 RPC 调用工具,将多步管道压缩为零上下文开销的轮次。</td></tr>
|
||||
<tr><td><b>随处运行</b></td><td>六种终端后端——本地、Docker、SSH、Daytona、Singularity 和 Modal。Daytona 和 Modal 提供 Serverless 持久化——代理环境空闲时休眠、按需唤醒,空闲期间几乎零成本。$5 VPS 或 GPU 集群都能跑。</td></tr>
|
||||
<tr><td><b>研究就绪</b></td><td>批量轨迹生成、Atropos RL 环境、轨迹压缩——用于训练下一代工具调用模型。</td></tr>
|
||||
</table>
|
||||
|
||||
---
|
||||
|
||||
## 快速安装
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash
|
||||
```
|
||||
|
||||
支持 Linux、macOS、WSL2 和 Android (Termux)。安装程序会自动处理平台特定的配置。
|
||||
|
||||
> **Android / Termux:** 已测试的手动安装路径请参考 [Termux 指南](https://hermes-agent.nousresearch.com/docs/getting-started/termux)。在 Termux 上,Hermes 会安装精选的 `.[termux]` 扩展,因为完整的 `.[all]` 扩展会拉取 Android 不兼容的语音依赖。
|
||||
>
|
||||
> **Windows:** 原生 Windows 不受支持。请安装 [WSL2](https://learn.microsoft.com/zh-cn/windows/wsl/install) 并运行上述命令。
|
||||
|
||||
安装后:
|
||||
|
||||
```bash
|
||||
source ~/.bashrc # 重新加载 shell(或: source ~/.zshrc)
|
||||
hermes # 开始对话!
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 快速入门
|
||||
|
||||
```bash
|
||||
hermes # 交互式 CLI — 开始对话
|
||||
hermes model # 选择 LLM 提供商和模型
|
||||
hermes tools # 配置启用的工具
|
||||
hermes config set # 设置单个配置项
|
||||
hermes gateway # 启动消息网关(Telegram、Discord 等)
|
||||
hermes setup # 运行完整设置向导(一次性配置所有内容)
|
||||
hermes claw migrate # 从 OpenClaw 迁移(如果来自 OpenClaw)
|
||||
hermes update # 更新到最新版本
|
||||
hermes doctor # 诊断问题
|
||||
```
|
||||
|
||||
📖 **[完整文档 →](https://hermes-agent.nousresearch.com/docs/)**
|
||||
|
||||
## CLI 与消息平台 快速对照
|
||||
|
||||
Hermes 有两种入口:用 `hermes` 启动终端 UI,或运行网关从 Telegram、Discord、Slack、WhatsApp、Signal 或 Email 与之对话。进入对话后,许多斜杠命令在两种界面中通用。
|
||||
|
||||
| 操作 | CLI | 消息平台 |
|
||||
|------|-----|----------|
|
||||
| 开始对话 | `hermes` | 运行 `hermes gateway setup` + `hermes gateway start`,然后给机器人发消息 |
|
||||
| 开始新对话 | `/new` 或 `/reset` | `/new` 或 `/reset` |
|
||||
| 更换模型 | `/model [provider:model]` | `/model [provider:model]` |
|
||||
| 设置人格 | `/personality [name]` | `/personality [name]` |
|
||||
| 重试或撤销上一轮 | `/retry`、`/undo` | `/retry`、`/undo` |
|
||||
| 压缩上下文 / 查看用量 | `/compress`、`/usage`、`/insights [--days N]` | `/compress`、`/usage`、`/insights [days]` |
|
||||
| 浏览技能 | `/skills` 或 `/<skill-name>` | `/skills` 或 `/<skill-name>` |
|
||||
| 中断当前工作 | `Ctrl+C` 或发送新消息 | `/stop` 或发送新消息 |
|
||||
| 平台特定状态 | `/platforms` | `/status`、`/sethome` |
|
||||
|
||||
完整命令列表请参阅 [CLI 指南](https://hermes-agent.nousresearch.com/docs/user-guide/cli) 和 [消息网关指南](https://hermes-agent.nousresearch.com/docs/user-guide/messaging)。
|
||||
|
||||
---
|
||||
|
||||
## 文档
|
||||
|
||||
所有文档位于 **[hermes-agent.nousresearch.com/docs](https://hermes-agent.nousresearch.com/docs/)**:
|
||||
|
||||
| 章节 | 内容 |
|
||||
|------|------|
|
||||
| [快速开始](https://hermes-agent.nousresearch.com/docs/getting-started/quickstart) | 安装 → 设置 → 2 分钟内开始首次对话 |
|
||||
| [CLI 使用](https://hermes-agent.nousresearch.com/docs/user-guide/cli) | 命令、快捷键、人格、会话 |
|
||||
| [配置](https://hermes-agent.nousresearch.com/docs/user-guide/configuration) | 配置文件、提供商、模型、所有选项 |
|
||||
| [消息网关](https://hermes-agent.nousresearch.com/docs/user-guide/messaging) | Telegram、Discord、Slack、WhatsApp、Signal、Home Assistant |
|
||||
| [安全](https://hermes-agent.nousresearch.com/docs/user-guide/security) | 命令审批、DM 配对、容器隔离 |
|
||||
| [工具与工具集](https://hermes-agent.nousresearch.com/docs/user-guide/features/tools) | 40+ 工具、工具集系统、终端后端 |
|
||||
| [技能系统](https://hermes-agent.nousresearch.com/docs/user-guide/features/skills) | 过程记忆、技能中心、创建技能 |
|
||||
| [记忆](https://hermes-agent.nousresearch.com/docs/user-guide/features/memory) | 持久记忆、用户画像、最佳实践 |
|
||||
| [MCP 集成](https://hermes-agent.nousresearch.com/docs/user-guide/features/mcp) | 连接任意 MCP 服务器扩展能力 |
|
||||
| [定时调度](https://hermes-agent.nousresearch.com/docs/user-guide/features/cron) | 定时任务与平台投递 |
|
||||
| [上下文文件](https://hermes-agent.nousresearch.com/docs/user-guide/features/context-files) | 影响每次对话的项目上下文 |
|
||||
| [架构](https://hermes-agent.nousresearch.com/docs/developer-guide/architecture) | 项目结构、代理循环、关键类 |
|
||||
| [贡献](https://hermes-agent.nousresearch.com/docs/developer-guide/contributing) | 开发设置、PR 流程、代码风格 |
|
||||
| [CLI 参考](https://hermes-agent.nousresearch.com/docs/reference/cli-commands) | 所有命令和标志 |
|
||||
| [环境变量](https://hermes-agent.nousresearch.com/docs/reference/environment-variables) | 完整环境变量参考 |
|
||||
|
||||
---
|
||||
|
||||
## 从 OpenClaw 迁移
|
||||
|
||||
如果你来自 OpenClaw,Hermes 可以自动导入你的设置、记忆、技能和 API 密钥。
|
||||
|
||||
**首次安装时:** 安装向导(`hermes setup`)会自动检测 `~/.openclaw` 并在配置开始前提供迁移选项。
|
||||
|
||||
**安装后任意时间:**
|
||||
|
||||
```bash
|
||||
hermes claw migrate # 交互式迁移(完整预设)
|
||||
hermes claw migrate --dry-run # 预览将要迁移的内容
|
||||
hermes claw migrate --preset user-data # 仅迁移用户数据,不含密钥
|
||||
hermes claw migrate --overwrite # 覆盖已有冲突
|
||||
```
|
||||
|
||||
导入内容:
|
||||
- **SOUL.md** — 人格文件
|
||||
- **记忆** — MEMORY.md 和 USER.md 条目
|
||||
- **技能** — 用户创建的技能 → `~/.hermes/skills/openclaw-imports/`
|
||||
- **命令白名单** — 审批模式
|
||||
- **消息设置** — 平台配置、允许用户、工作目录
|
||||
- **API 密钥** — 白名单中的密钥(Telegram、OpenRouter、OpenAI、Anthropic、ElevenLabs)
|
||||
- **TTS 资产** — 工作区音频文件
|
||||
- **工作区指令** — AGENTS.md(使用 `--workspace-target`)
|
||||
|
||||
使用 `hermes claw migrate --help` 查看所有选项,或使用 `openclaw-migration` 技能进行交互式代理引导迁移(含干运行预览)。
|
||||
|
||||
---
|
||||
|
||||
## 贡献
|
||||
|
||||
欢迎贡献!请参阅 [贡献指南](https://hermes-agent.nousresearch.com/docs/developer-guide/contributing) 了解开发设置、代码风格和 PR 流程。
|
||||
|
||||
贡献者快速开始——克隆并使用 `setup-hermes.sh`:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/NousResearch/hermes-agent.git
|
||||
cd hermes-agent
|
||||
./setup-hermes.sh # 安装 uv、创建 venv、安装 .[all]、创建符号链接 ~/.local/bin/hermes
|
||||
./hermes # 自动检测 venv,无需先 source
|
||||
```
|
||||
|
||||
手动安装(等效于上述命令):
|
||||
|
||||
```bash
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
uv venv venv --python 3.11
|
||||
source venv/bin/activate
|
||||
uv pip install -e ".[all,dev]"
|
||||
python -m pytest tests/ -q
|
||||
```
|
||||
|
||||
> **RL 训练(可选):** 如需参与 RL/Tinker-Atropos 集成开发:
|
||||
> ```bash
|
||||
> git submodule update --init tinker-atropos
|
||||
> uv pip install -e "./tinker-atropos"
|
||||
> ```
|
||||
|
||||
---
|
||||
|
||||
## 社区
|
||||
|
||||
- 💬 [Discord](https://discord.gg/NousResearch)
|
||||
- 📚 [技能中心](https://agentskills.io)
|
||||
- 🐛 [问题反馈](https://github.com/NousResearch/hermes-agent/issues)
|
||||
- 💡 [讨论区](https://github.com/NousResearch/hermes-agent/discussions)
|
||||
- 🔌 [HermesClaw](https://github.com/AaronWong1999/hermesclaw) — 社区微信桥接:在同一微信账号上运行 Hermes Agent 和 OpenClaw。
|
||||
|
||||
---
|
||||
|
||||
## 许可证
|
||||
|
||||
MIT — 详见 [LICENSE](LICENSE)。
|
||||
|
||||
由 [Nous Research](https://nousresearch.com) 构建。
|
||||
@@ -1,27 +0,0 @@
|
||||
# Hermes Agent v0.10.0 (v2026.4.16)
|
||||
|
||||
**Release Date:** April 16, 2026
|
||||
|
||||
> The Tool Gateway release — paid Nous Portal subscribers can now use web search, image generation, text-to-speech, and browser automation through their existing subscription with zero additional API keys.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Highlights
|
||||
|
||||
- **Nous Tool Gateway** — Paid [Nous Portal](https://portal.nousresearch.com) subscribers now get automatic access to **web search** (Firecrawl), **image generation** (FAL / FLUX 2 Pro), **text-to-speech** (OpenAI TTS), and **browser automation** (Browser Use) through their existing subscription. No separate API keys needed — just run `hermes model`, select Nous Portal, and pick which tools to enable. Per-tool opt-in via `use_gateway` config, full integration with `hermes tools` and `hermes status`, and the runtime correctly prefers the gateway even when direct API keys exist. Replaces the old hidden `HERMES_ENABLE_NOUS_MANAGED_TOOLS` env var with clean subscription-based detection. ([#11206](https://github.com/NousResearch/hermes-agent/pull/11206), based on work by @jquesnelle; docs: [#11208](https://github.com/NousResearch/hermes-agent/pull/11208))
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Bug Fixes & Improvements
|
||||
|
||||
This release includes 180+ commits with numerous bug fixes, platform improvements, and reliability enhancements across the agent core, gateway, CLI, and tool system. Full details will be published in the v0.11.0 changelog.
|
||||
|
||||
---
|
||||
|
||||
## 👥 Contributors
|
||||
|
||||
- **@jquesnelle** (emozilla) — Original Tool Gateway implementation ([#10799](https://github.com/NousResearch/hermes-agent/pull/10799)), salvaged and shipped in this release
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v2026.4.13...v2026.4.16](https://github.com/NousResearch/hermes-agent/compare/v2026.4.13...v2026.4.16)
|
||||
@@ -1,453 +0,0 @@
|
||||
# Hermes Agent v0.11.0 (v2026.4.23)
|
||||
|
||||
**Release Date:** April 23, 2026
|
||||
**Since v0.9.0:** 1,556 commits · 761 merged PRs · 1,314 files changed · 224,174 insertions · 29 community contributors (290 including co-authors)
|
||||
|
||||
> The Interface release — a full React/Ink rewrite of the interactive CLI, a pluggable transport architecture underneath every provider, native AWS Bedrock support, five new inference paths, a 17th messaging platform (QQBot), a dramatically expanded plugin surface, and GPT-5.5 via Codex OAuth.
|
||||
|
||||
This release also folds in all the highlights deferred from v0.10.0 (which shipped only the Nous Tool Gateway) — so it covers roughly two weeks of work across the whole stack.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Highlights
|
||||
|
||||
- **New Ink-based TUI** — `hermes --tui` is now a full React/Ink rewrite of the interactive CLI, with a Python JSON-RPC backend (`tui_gateway`). Sticky composer, live streaming with OSC-52 clipboard support, stable picker keys, status bar with per-turn stopwatch and git branch, `/clear` confirm, light-theme preset, and a subagent spawn observability overlay. ~310 commits to `ui-tui/` + `tui_gateway/`. (@OutThisLife + Teknium)
|
||||
|
||||
- **Transport ABC + Native AWS Bedrock** — Format conversion and HTTP transport were extracted from `run_agent.py` into a pluggable `agent/transports/` layer. `AnthropicTransport`, `ChatCompletionsTransport`, `ResponsesApiTransport`, and `BedrockTransport` each own their own format conversion and API shape. Native AWS Bedrock support via the Converse API ships on top of the new abstraction. ([#10549](https://github.com/NousResearch/hermes-agent/pull/10549), [#13347](https://github.com/NousResearch/hermes-agent/pull/13347), [#13366](https://github.com/NousResearch/hermes-agent/pull/13366), [#13430](https://github.com/NousResearch/hermes-agent/pull/13430), [#13805](https://github.com/NousResearch/hermes-agent/pull/13805), [#13814](https://github.com/NousResearch/hermes-agent/pull/13814) — @kshitijk4poor + Teknium)
|
||||
|
||||
- **Five new inference paths** — Native NVIDIA NIM ([#11774](https://github.com/NousResearch/hermes-agent/pull/11774)), Arcee AI ([#9276](https://github.com/NousResearch/hermes-agent/pull/9276)), Step Plan ([#13893](https://github.com/NousResearch/hermes-agent/pull/13893)), Google Gemini CLI OAuth ([#11270](https://github.com/NousResearch/hermes-agent/pull/11270)), and Vercel ai-gateway with pricing + dynamic discovery ([#13223](https://github.com/NousResearch/hermes-agent/pull/13223) — @jerilynzheng). Plus Gemini routed through the native AI Studio API for better performance ([#12674](https://github.com/NousResearch/hermes-agent/pull/12674)).
|
||||
|
||||
- **GPT-5.5 over Codex OAuth** — OpenAI's new GPT-5.5 reasoning model is now available through your ChatGPT Codex OAuth, with live model discovery wired into the model picker so new OpenAI releases show up without catalog updates. ([#14720](https://github.com/NousResearch/hermes-agent/pull/14720))
|
||||
|
||||
- **QQBot — 17th supported platform** — Native QQBot adapter via QQ Official API v2, with QR scan-to-configure setup wizard, streaming cursor, emoji reactions, and DM/group policy gating that matches WeCom/Weixin parity. ([#9364](https://github.com/NousResearch/hermes-agent/pull/9364), [#11831](https://github.com/NousResearch/hermes-agent/pull/11831))
|
||||
|
||||
- **Plugin surface expanded** — Plugins can now register slash commands (`register_command`), dispatch tools directly (`dispatch_tool`), block tool execution from hooks (`pre_tool_call` can veto), rewrite tool results (`transform_tool_result`), transform terminal output (`transform_terminal_output`), ship image_gen backends, and add custom dashboard tabs. The bundled disk-cleanup plugin is opt-in by default as a reference implementation. ([#9377](https://github.com/NousResearch/hermes-agent/pull/9377), [#10626](https://github.com/NousResearch/hermes-agent/pull/10626), [#10763](https://github.com/NousResearch/hermes-agent/pull/10763), [#10951](https://github.com/NousResearch/hermes-agent/pull/10951), [#12929](https://github.com/NousResearch/hermes-agent/pull/12929), [#12944](https://github.com/NousResearch/hermes-agent/pull/12944), [#12972](https://github.com/NousResearch/hermes-agent/pull/12972), [#13799](https://github.com/NousResearch/hermes-agent/pull/13799), [#14175](https://github.com/NousResearch/hermes-agent/pull/14175))
|
||||
|
||||
- **`/steer` — mid-run agent nudges** — `/steer <prompt>` injects a note that the running agent sees after its next tool call, without interrupting the turn or breaking prompt cache. For when you want to course-correct an agent in-flight. ([#12116](https://github.com/NousResearch/hermes-agent/pull/12116))
|
||||
|
||||
- **Shell hooks** — Wire any shell script as a Hermes lifecycle hook (pre_tool_call, post_tool_call, on_session_start, etc.) without writing a Python plugin. ([#13296](https://github.com/NousResearch/hermes-agent/pull/13296))
|
||||
|
||||
- **Webhook direct-delivery mode** — Webhook subscriptions can now forward payloads straight to a platform chat without going through the agent — zero-LLM push notifications for alerting, uptime checks, and event streams. ([#12473](https://github.com/NousResearch/hermes-agent/pull/12473))
|
||||
|
||||
- **Smarter delegation** — Subagents now have an explicit `orchestrator` role that can spawn their own workers, with configurable `max_spawn_depth` (default flat). Concurrent sibling subagents share filesystem state through a file-coordination layer so they don't clobber each other's edits. ([#13691](https://github.com/NousResearch/hermes-agent/pull/13691), [#13718](https://github.com/NousResearch/hermes-agent/pull/13718))
|
||||
|
||||
- **Auxiliary models — configurable UI + main-model-first** — `hermes model` has a dedicated "Configure auxiliary models" screen for per-task overrides (compression, vision, session_search, title_generation). `auto` routing now defaults to the main model for side tasks across all users (previously aggregator users were silently routed to a cheap provider-side default). ([#11891](https://github.com/NousResearch/hermes-agent/pull/11891), [#11900](https://github.com/NousResearch/hermes-agent/pull/11900))
|
||||
|
||||
- **Dashboard plugin system + live theme switching** — The web dashboard is now extensible. Third-party plugins can add custom tabs, widgets, and views without forking. Paired with a live-switching theme system — themes now control colors, fonts, layout, and density — so users can hot-swap the dashboard look without a reload. Same theming discipline the CLI has, now on the web. ([#10951](https://github.com/NousResearch/hermes-agent/pull/10951), [#10687](https://github.com/NousResearch/hermes-agent/pull/10687), [#14725](https://github.com/NousResearch/hermes-agent/pull/14725))
|
||||
|
||||
- **Dashboard polish** — i18n (English + Chinese), react-router sidebar layout, mobile-responsive, Vercel deployment, real per-session API call tracking, and one-click update + gateway restart buttons. ([#9228](https://github.com/NousResearch/hermes-agent/pull/9228), [#9370](https://github.com/NousResearch/hermes-agent/pull/9370), [#9453](https://github.com/NousResearch/hermes-agent/pull/9453), [#10686](https://github.com/NousResearch/hermes-agent/pull/10686), [#13526](https://github.com/NousResearch/hermes-agent/pull/13526), [#14004](https://github.com/NousResearch/hermes-agent/pull/14004) — @austinpickett + @DeployFaith + Teknium)
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Core Agent & Architecture
|
||||
|
||||
### Transport Layer (NEW)
|
||||
- **Transport ABC** abstracts format conversion and HTTP transport from `run_agent.py` into `agent/transports/` ([#13347](https://github.com/NousResearch/hermes-agent/pull/13347))
|
||||
- **AnthropicTransport** — Anthropic Messages API path ([#13366](https://github.com/NousResearch/hermes-agent/pull/13366), @kshitijk4poor)
|
||||
- **ChatCompletionsTransport** — default path for OpenAI-compatible providers ([#13805](https://github.com/NousResearch/hermes-agent/pull/13805))
|
||||
- **ResponsesApiTransport** — OpenAI Responses API + Codex build_kwargs wiring ([#13430](https://github.com/NousResearch/hermes-agent/pull/13430), @kshitijk4poor)
|
||||
- **BedrockTransport** — AWS Bedrock Converse API transport ([#13814](https://github.com/NousResearch/hermes-agent/pull/13814))
|
||||
|
||||
### Provider & Model Support
|
||||
- **Native AWS Bedrock provider** via Converse API ([#10549](https://github.com/NousResearch/hermes-agent/pull/10549))
|
||||
- **NVIDIA NIM native provider** (salvage of #11703) ([#11774](https://github.com/NousResearch/hermes-agent/pull/11774))
|
||||
- **Arcee AI direct provider** ([#9276](https://github.com/NousResearch/hermes-agent/pull/9276))
|
||||
- **Step Plan provider** (salvage #6005) ([#13893](https://github.com/NousResearch/hermes-agent/pull/13893), @kshitijk4poor)
|
||||
- **Google Gemini CLI OAuth** inference provider ([#11270](https://github.com/NousResearch/hermes-agent/pull/11270))
|
||||
- **Vercel ai-gateway** with pricing, attribution, and dynamic discovery ([#13223](https://github.com/NousResearch/hermes-agent/pull/13223), @jerilynzheng)
|
||||
- **GPT-5.5 over Codex OAuth** with live model discovery in the picker ([#14720](https://github.com/NousResearch/hermes-agent/pull/14720))
|
||||
- **Gemini routed through native AI Studio API** ([#12674](https://github.com/NousResearch/hermes-agent/pull/12674))
|
||||
- **xAI Grok upgraded to Responses API** ([#10783](https://github.com/NousResearch/hermes-agent/pull/10783))
|
||||
- **Ollama improvements** — Cloud provider support, GLM continuation, `think=false` control, surrogate sanitization, `/v1` hint ([#10782](https://github.com/NousResearch/hermes-agent/pull/10782))
|
||||
- **Kimi K2.6** across OpenRouter, Nous Portal, native Kimi, and HuggingFace ([#13148](https://github.com/NousResearch/hermes-agent/pull/13148), [#13152](https://github.com/NousResearch/hermes-agent/pull/13152), [#13169](https://github.com/NousResearch/hermes-agent/pull/13169))
|
||||
- **Kimi K2.5** promoted to first position in all model suggestion lists ([#11745](https://github.com/NousResearch/hermes-agent/pull/11745), @kshitijk4poor)
|
||||
- **Xiaomi MiMo v2.5-pro + v2.5** on OpenRouter, Nous Portal, and native ([#14184](https://github.com/NousResearch/hermes-agent/pull/14184), [#14635](https://github.com/NousResearch/hermes-agent/pull/14635), @kshitijk4poor)
|
||||
- **GLM-5V-Turbo** for coding plan ([#9907](https://github.com/NousResearch/hermes-agent/pull/9907))
|
||||
- **Claude Opus 4.7** in Nous Portal catalog ([#11398](https://github.com/NousResearch/hermes-agent/pull/11398))
|
||||
- **OpenRouter elephant-alpha** in curated lists ([#9378](https://github.com/NousResearch/hermes-agent/pull/9378))
|
||||
- **OpenCode-Go** — Kimi K2.6 and Qwen3.5/3.6 Plus in curated catalog ([#13429](https://github.com/NousResearch/hermes-agent/pull/13429))
|
||||
- **minimax/minimax-m2.5:free** in OpenRouter catalog ([#13836](https://github.com/NousResearch/hermes-agent/pull/13836))
|
||||
- **`/model` merges models.dev entries** for lesser-loved providers ([#14221](https://github.com/NousResearch/hermes-agent/pull/14221))
|
||||
- **Per-provider + per-model `request_timeout_seconds`** config ([#12652](https://github.com/NousResearch/hermes-agent/pull/12652))
|
||||
- **Configurable API retry count** via `agent.api_max_retries` ([#14730](https://github.com/NousResearch/hermes-agent/pull/14730))
|
||||
- **ctx_size context length key** for Lemonade server (salvage #8536) ([#14215](https://github.com/NousResearch/hermes-agent/pull/14215))
|
||||
- **Custom provider display name prompt** ([#9420](https://github.com/NousResearch/hermes-agent/pull/9420))
|
||||
- **Recommendation badges** on tool provider selection ([#9929](https://github.com/NousResearch/hermes-agent/pull/9929))
|
||||
- Fix: correct GPT-5 family context lengths in fallback defaults ([#9309](https://github.com/NousResearch/hermes-agent/pull/9309))
|
||||
- Fix: clamp `minimal` reasoning effort to `low` on Responses API ([#9429](https://github.com/NousResearch/hermes-agent/pull/9429))
|
||||
- Fix: strip reasoning item IDs from Responses API input when `store=False` ([#10217](https://github.com/NousResearch/hermes-agent/pull/10217))
|
||||
- Fix: OpenViking correct account default + commit session on `/new` and compress ([#10463](https://github.com/NousResearch/hermes-agent/pull/10463))
|
||||
- Fix: Kimi `/coding` thinking block survival + empty reasoning_content + block ordering (multiple PRs)
|
||||
- Fix: don't send Anthropic thinking to api.kimi.com/coding ([#13826](https://github.com/NousResearch/hermes-agent/pull/13826))
|
||||
- Fix: send `max_tokens`, `reasoning_effort`, and `thinking` for Kimi/Moonshot
|
||||
- Fix: stream reasoning content through OpenAI-compatible providers that emit it
|
||||
|
||||
### Agent Loop & Conversation
|
||||
- **`/steer <prompt>`** — mid-run agent nudges after next tool call ([#12116](https://github.com/NousResearch/hermes-agent/pull/12116))
|
||||
- **Orchestrator role + configurable spawn depth** for `delegate_task` (default flat) ([#13691](https://github.com/NousResearch/hermes-agent/pull/13691))
|
||||
- **Cross-agent file state coordination** for concurrent subagents ([#13718](https://github.com/NousResearch/hermes-agent/pull/13718))
|
||||
- **Compressor smart collapse, dedup, anti-thrashing**, template upgrade, hardening ([#10088](https://github.com/NousResearch/hermes-agent/pull/10088))
|
||||
- **Compression summaries respect the conversation's language** ([#12556](https://github.com/NousResearch/hermes-agent/pull/12556))
|
||||
- **Compression model falls back to main model** on permanent 503/404 ([#10093](https://github.com/NousResearch/hermes-agent/pull/10093))
|
||||
- **Auto-continue interrupted agent work** after gateway restart ([#9934](https://github.com/NousResearch/hermes-agent/pull/9934))
|
||||
- **Activity heartbeats** prevent false gateway inactivity timeouts ([#10501](https://github.com/NousResearch/hermes-agent/pull/10501))
|
||||
- **Auxiliary models UI** — dedicated screen for per-task overrides ([#11891](https://github.com/NousResearch/hermes-agent/pull/11891))
|
||||
- **Auxiliary auto routing defaults to main model** for all users ([#11900](https://github.com/NousResearch/hermes-agent/pull/11900))
|
||||
- **PLATFORM_HINTS for Matrix, Mattermost, Feishu** ([#14428](https://github.com/NousResearch/hermes-agent/pull/14428), @alt-glitch)
|
||||
- Fix: reset retry counters after compression; stop poisoning conversation history ([#10055](https://github.com/NousResearch/hermes-agent/pull/10055))
|
||||
- Fix: break compression-exhaustion infinite loop and auto-reset session ([#10063](https://github.com/NousResearch/hermes-agent/pull/10063))
|
||||
- Fix: stale agent timeout, uv venv detection, empty response after tools ([#10065](https://github.com/NousResearch/hermes-agent/pull/10065))
|
||||
- Fix: prevent premature loop exit when weak models return empty after substantive tool calls ([#10472](https://github.com/NousResearch/hermes-agent/pull/10472))
|
||||
- Fix: preserve pre-start terminal interrupts ([#10504](https://github.com/NousResearch/hermes-agent/pull/10504))
|
||||
- Fix: improve interrupt responsiveness during concurrent tool execution ([#10935](https://github.com/NousResearch/hermes-agent/pull/10935))
|
||||
- Fix: word-wrap spinner, interruptable agent join, and delegate_task interrupt ([#10940](https://github.com/NousResearch/hermes-agent/pull/10940))
|
||||
- Fix: `/stop` no longer resets the session ([#9224](https://github.com/NousResearch/hermes-agent/pull/9224))
|
||||
- Fix: honor interrupts during MCP tool waits ([#9382](https://github.com/NousResearch/hermes-agent/pull/9382), @helix4u)
|
||||
- Fix: break stuck session resume loops after repeated restarts ([#9941](https://github.com/NousResearch/hermes-agent/pull/9941))
|
||||
- Fix: empty response nudge crash + placeholder leak to cron targets ([#11021](https://github.com/NousResearch/hermes-agent/pull/11021))
|
||||
- Fix: streaming cursor sanitization to prevent message truncation (multiple PRs)
|
||||
- Fix: resolve `context_length` for plugin context engines ([#9238](https://github.com/NousResearch/hermes-agent/pull/9238))
|
||||
|
||||
### Session & Memory
|
||||
- **Auto-prune old sessions + VACUUM state.db** at startup ([#13861](https://github.com/NousResearch/hermes-agent/pull/13861))
|
||||
- **Honcho overhaul** — context injection, 5-tool surface, cost safety, session isolation ([#10619](https://github.com/NousResearch/hermes-agent/pull/10619))
|
||||
- **Hindsight richer session-scoped retain metadata** (salvage of #6290) ([#13987](https://github.com/NousResearch/hermes-agent/pull/13987))
|
||||
- Fix: deduplicate memory provider tools to prevent 400 on strict providers ([#10511](https://github.com/NousResearch/hermes-agent/pull/10511))
|
||||
- Fix: discover user-installed memory providers from `$HERMES_HOME/plugins/` ([#10529](https://github.com/NousResearch/hermes-agent/pull/10529))
|
||||
- Fix: add `on_memory_write` bridge to sequential tool execution path ([#10507](https://github.com/NousResearch/hermes-agent/pull/10507))
|
||||
- Fix: preserve `session_id` across `previous_response_id` chains in `/v1/responses` ([#10059](https://github.com/NousResearch/hermes-agent/pull/10059))
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ New Ink-based TUI
|
||||
|
||||
A full React/Ink rewrite of the interactive CLI — invoked via `hermes --tui` or `HERMES_TUI=1`. Shipped across ~310 commits to `ui-tui/` and `tui_gateway/`.
|
||||
|
||||
### TUI Foundations
|
||||
- New TUI based on Ink + Python JSON-RPC backend
|
||||
- Prettier + ESLint + vitest tooling for `ui-tui/`
|
||||
- Entry split between `src/entry.tsx` (TTY gate) and `src/app.tsx` (state machine)
|
||||
- Persistent `_SlashWorker` subprocess for slash command dispatch
|
||||
|
||||
### UX & Features
|
||||
- **Stable picker keys, /clear confirm, light-theme preset** ([#12312](https://github.com/NousResearch/hermes-agent/pull/12312), @OutThisLife)
|
||||
- **Git branch in status bar** cwd label ([#12305](https://github.com/NousResearch/hermes-agent/pull/12305), @OutThisLife)
|
||||
- **Per-turn elapsed stopwatch in FaceTicker + done-in sys line** ([#13105](https://github.com/NousResearch/hermes-agent/pull/13105), @OutThisLife)
|
||||
- **Subagent spawn observability overlay** ([#14045](https://github.com/NousResearch/hermes-agent/pull/14045), @OutThisLife)
|
||||
- **Per-prompt elapsed stopwatch in status bar** ([#12948](https://github.com/NousResearch/hermes-agent/pull/12948))
|
||||
- Sticky composer that freezes during scroll
|
||||
- OSC-52 clipboard support for copy across SSH sessions
|
||||
- Virtualized history rendering for performance
|
||||
- Slash command autocomplete via `complete.slash` RPC
|
||||
- Path autocomplete via `complete.path` RPC
|
||||
- Dozens of resize/ghosting/sticky-prompt fixes landed through the week
|
||||
|
||||
### Structural Refactors
|
||||
- Decomposed `app.tsx` into `app/event-handler`, `app/slash-handler`, `app/stores`, `app/hooks` ([#14640](https://github.com/NousResearch/hermes-agent/pull/14640) and surrounding)
|
||||
- Component split: `branding.tsx`, `markdown.tsx`, `prompts.tsx`, `sessionPicker.tsx`, `messageLine.tsx`, `thinking.tsx`, `maskedPrompt.tsx`
|
||||
- Hook split: `useCompletion`, `useInputHistory`, `useQueue`, `useVirtualHistory`
|
||||
|
||||
---
|
||||
|
||||
## 📱 Messaging Platforms (Gateway)
|
||||
|
||||
### New Platforms
|
||||
- **QQBot (17th platform)** — QQ Official API v2 adapter with QR setup, streaming, package split ([#9364](https://github.com/NousResearch/hermes-agent/pull/9364), [#11831](https://github.com/NousResearch/hermes-agent/pull/11831))
|
||||
|
||||
### Telegram
|
||||
- **Dedicated `TELEGRAM_PROXY` env var + config.yaml proxy support** (closes #9414, #6530, #9074, #7786) ([#10681](https://github.com/NousResearch/hermes-agent/pull/10681))
|
||||
- **`ignored_threads` config** for Telegram groups ([#9530](https://github.com/NousResearch/hermes-agent/pull/9530))
|
||||
- **Config option to disable link previews** (closes #8728) ([#10610](https://github.com/NousResearch/hermes-agent/pull/10610))
|
||||
- **Auto-wrap markdown tables** in code blocks ([#11794](https://github.com/NousResearch/hermes-agent/pull/11794))
|
||||
- Fix: prevent duplicate replies when stream task is cancelled ([#9319](https://github.com/NousResearch/hermes-agent/pull/9319))
|
||||
- Fix: prevent streaming cursor (▉) from appearing as standalone messages ([#9538](https://github.com/NousResearch/hermes-agent/pull/9538))
|
||||
- Fix: retry transient tool sends + cold-boot budget ([#10947](https://github.com/NousResearch/hermes-agent/pull/10947))
|
||||
- Fix: Markdown special char escaping in `send_exec_approval`
|
||||
- Fix: parentheses in URLs during MarkdownV2 link conversion
|
||||
- Fix: Unicode dash normalization in model switch (closes iOS smart-punctuation issue)
|
||||
- Many platform hint / streaming / session-key fixes
|
||||
|
||||
### Discord
|
||||
- **Forum channel support** (salvage of #10145 + media + polish) ([#11920](https://github.com/NousResearch/hermes-agent/pull/11920))
|
||||
- **`DISCORD_ALLOWED_ROLES`** for role-based access control ([#11608](https://github.com/NousResearch/hermes-agent/pull/11608))
|
||||
- **Config option to disable slash commands** (salvage #13130) ([#14315](https://github.com/NousResearch/hermes-agent/pull/14315))
|
||||
- **Native `send_animation`** for inline GIF playback ([#10283](https://github.com/NousResearch/hermes-agent/pull/10283))
|
||||
- **`send_message` Discord media attachments** ([#10246](https://github.com/NousResearch/hermes-agent/pull/10246))
|
||||
- **`/skill` command group** with category subcommands ([#9909](https://github.com/NousResearch/hermes-agent/pull/9909))
|
||||
- **Extract reply text from message references** ([#9781](https://github.com/NousResearch/hermes-agent/pull/9781))
|
||||
|
||||
### Feishu
|
||||
- **Intelligent reply on document comments** with 3-tier access control ([#11898](https://github.com/NousResearch/hermes-agent/pull/11898))
|
||||
- **Show processing state via reactions** on user messages ([#12927](https://github.com/NousResearch/hermes-agent/pull/12927))
|
||||
- **Preserve @mention context for agent consumption** (salvage #13874) ([#14167](https://github.com/NousResearch/hermes-agent/pull/14167))
|
||||
|
||||
### DingTalk
|
||||
- **`require_mention` + `allowed_users` gating** (parity with Slack/Telegram/Discord) ([#11564](https://github.com/NousResearch/hermes-agent/pull/11564))
|
||||
- **QR-code device-flow authorization** for setup wizard ([#11574](https://github.com/NousResearch/hermes-agent/pull/11574))
|
||||
- **AI Cards streaming, emoji reactions, and media handling** (salvage of #10985) ([#11910](https://github.com/NousResearch/hermes-agent/pull/11910))
|
||||
|
||||
### WhatsApp
|
||||
- **`send_voice`** — native audio message delivery ([#13002](https://github.com/NousResearch/hermes-agent/pull/13002))
|
||||
- **`dm_policy` and `group_policy`** parity with WeCom/Weixin/QQ adapters ([#13151](https://github.com/NousResearch/hermes-agent/pull/13151))
|
||||
|
||||
### WeCom / Weixin
|
||||
- **WeCom QR-scan bot creation + interactive setup wizard** (salvage #13923) ([#13961](https://github.com/NousResearch/hermes-agent/pull/13961))
|
||||
|
||||
### Signal
|
||||
- **Media delivery support** via `send_message` ([#13178](https://github.com/NousResearch/hermes-agent/pull/13178))
|
||||
|
||||
### Slack
|
||||
- **Per-thread sessions for DMs by default** ([#10987](https://github.com/NousResearch/hermes-agent/pull/10987))
|
||||
|
||||
### BlueBubbles (iMessage)
|
||||
- Group chat session separation, webhook registration & auth fixes ([#9806](https://github.com/NousResearch/hermes-agent/pull/9806))
|
||||
|
||||
### Gateway Core
|
||||
- **Gateway proxy mode** — forward messages to a remote API server ([#9787](https://github.com/NousResearch/hermes-agent/pull/9787))
|
||||
- **Per-channel ephemeral prompts** (Discord, Telegram, Slack, Mattermost) ([#10564](https://github.com/NousResearch/hermes-agent/pull/10564))
|
||||
- **Surface plugin slash commands** natively on all platforms + decision-capable command hook ([#14175](https://github.com/NousResearch/hermes-agent/pull/14175))
|
||||
- **Support document/archive extensions in MEDIA: tag extraction** (salvage #8255) ([#14307](https://github.com/NousResearch/hermes-agent/pull/14307))
|
||||
- **Recognize `.pdf` in MEDIA: tag extraction** ([#13683](https://github.com/NousResearch/hermes-agent/pull/13683))
|
||||
- **`--all` flag for `gateway start` and `restart`** ([#10043](https://github.com/NousResearch/hermes-agent/pull/10043))
|
||||
- **Notify active sessions on gateway shutdown** + update health check ([#9850](https://github.com/NousResearch/hermes-agent/pull/9850))
|
||||
- **Block agent from self-destructing the gateway** via terminal (closes #6666) ([#9895](https://github.com/NousResearch/hermes-agent/pull/9895))
|
||||
- Fix: suppress duplicate replies on interrupt and streaming flood control ([#10235](https://github.com/NousResearch/hermes-agent/pull/10235))
|
||||
- Fix: close temporary agents after one-off tasks ([#11028](https://github.com/NousResearch/hermes-agent/pull/11028), @kshitijk4poor)
|
||||
- Fix: busy-session ack when user messages during active agent run ([#10068](https://github.com/NousResearch/hermes-agent/pull/10068))
|
||||
- Fix: route watch-pattern notifications to the originating session ([#10460](https://github.com/NousResearch/hermes-agent/pull/10460))
|
||||
- Fix: preserve notify context in executor threads ([#10921](https://github.com/NousResearch/hermes-agent/pull/10921), @kshitijk4poor)
|
||||
- Fix: avoid duplicate replies after interrupted long tasks ([#11018](https://github.com/NousResearch/hermes-agent/pull/11018))
|
||||
- Fix: unlink stale PID + lock files on cleanup
|
||||
- Fix: force-unlink stale PID file after `--replace` takeover
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Tool System
|
||||
|
||||
### Plugin Surface (major expansion)
|
||||
- **`register_command()`** — plugins can now add slash commands ([#10626](https://github.com/NousResearch/hermes-agent/pull/10626))
|
||||
- **`dispatch_tool()`** — plugins can invoke tools from their code ([#10763](https://github.com/NousResearch/hermes-agent/pull/10763))
|
||||
- **`pre_tool_call` blocking** — plugins can veto tool execution ([#9377](https://github.com/NousResearch/hermes-agent/pull/9377))
|
||||
- **`transform_tool_result`** — plugins rewrite tool results generically ([#12972](https://github.com/NousResearch/hermes-agent/pull/12972))
|
||||
- **`transform_terminal_output`** — plugins rewrite terminal tool output ([#12929](https://github.com/NousResearch/hermes-agent/pull/12929))
|
||||
- **Namespaced skill registration** for plugin skill bundles ([#9786](https://github.com/NousResearch/hermes-agent/pull/9786))
|
||||
- **Opt-in-by-default + bundled disk-cleanup plugin** (salvage #12212) ([#12944](https://github.com/NousResearch/hermes-agent/pull/12944))
|
||||
- **Pluggable `image_gen` backends + OpenAI provider** ([#13799](https://github.com/NousResearch/hermes-agent/pull/13799))
|
||||
- **`openai-codex` image_gen plugin** (gpt-image-2 via Codex OAuth) ([#14317](https://github.com/NousResearch/hermes-agent/pull/14317))
|
||||
- **Shell hooks** — wire shell scripts as hook callbacks ([#13296](https://github.com/NousResearch/hermes-agent/pull/13296))
|
||||
|
||||
### Browser
|
||||
- **`browser_cdp` raw DevTools Protocol passthrough** ([#12369](https://github.com/NousResearch/hermes-agent/pull/12369))
|
||||
- Camofox hardening + connection stability across the window
|
||||
|
||||
### Execute Code
|
||||
- **Project/strict execution modes** (default: project) ([#11971](https://github.com/NousResearch/hermes-agent/pull/11971))
|
||||
|
||||
### Image Generation
|
||||
- **Multi-model FAL support** with picker in `hermes tools` ([#11265](https://github.com/NousResearch/hermes-agent/pull/11265))
|
||||
- **Recraft V3 → V4 Pro, Nano Banana → Pro upgrades** ([#11406](https://github.com/NousResearch/hermes-agent/pull/11406))
|
||||
- **GPT Image 2** in FAL catalog ([#13677](https://github.com/NousResearch/hermes-agent/pull/13677))
|
||||
- **xAI image generation provider** (grok-imagine-image) ([#14765](https://github.com/NousResearch/hermes-agent/pull/14765))
|
||||
|
||||
### TTS / STT / Voice
|
||||
- **Google Gemini TTS provider** ([#11229](https://github.com/NousResearch/hermes-agent/pull/11229))
|
||||
- **xAI Grok STT provider** ([#14473](https://github.com/NousResearch/hermes-agent/pull/14473))
|
||||
- **xAI TTS** (shipped with Responses API upgrade) ([#10783](https://github.com/NousResearch/hermes-agent/pull/10783))
|
||||
- **KittenTTS local provider** (salvage of #2109) ([#13395](https://github.com/NousResearch/hermes-agent/pull/13395))
|
||||
- **CLI record beep toggle** ([#13247](https://github.com/NousResearch/hermes-agent/pull/13247), @helix4u)
|
||||
|
||||
### Webhook / Cron
|
||||
- **Webhook direct-delivery mode** — zero-LLM push notifications ([#12473](https://github.com/NousResearch/hermes-agent/pull/12473))
|
||||
- **Cron `wakeAgent` gate** — scripts can skip the agent entirely ([#12373](https://github.com/NousResearch/hermes-agent/pull/12373))
|
||||
- **Cron per-job `enabled_toolsets`** — cap token overhead + cost per job ([#14767](https://github.com/NousResearch/hermes-agent/pull/14767))
|
||||
|
||||
### Delegate
|
||||
- **Orchestrator role** + configurable spawn depth (default flat) ([#13691](https://github.com/NousResearch/hermes-agent/pull/13691))
|
||||
- **Cross-agent file state coordination** ([#13718](https://github.com/NousResearch/hermes-agent/pull/13718))
|
||||
|
||||
### File / Patch
|
||||
- **`patch` — "did you mean?" feedback** when patch fails to match ([#13435](https://github.com/NousResearch/hermes-agent/pull/13435))
|
||||
|
||||
### API Server
|
||||
- **Stream `/v1/responses` SSE tool events** (salvage #9779) ([#10049](https://github.com/NousResearch/hermes-agent/pull/10049))
|
||||
- **Inline image inputs** on `/v1/chat/completions` and `/v1/responses` ([#12969](https://github.com/NousResearch/hermes-agent/pull/12969))
|
||||
|
||||
### Docker / Podman
|
||||
- **Entry-level Podman support** — `find_docker()` + rootless entrypoint ([#10066](https://github.com/NousResearch/hermes-agent/pull/10066))
|
||||
- **Add docker-cli to Docker image** (salvage #10096) ([#14232](https://github.com/NousResearch/hermes-agent/pull/14232))
|
||||
- **File-sync back to host on teardown** (salvage of #8189 + hardening) ([#11291](https://github.com/NousResearch/hermes-agent/pull/11291))
|
||||
|
||||
### MCP
|
||||
- 12 MCP improvements across the window (status, timeout handling, tool-call forwarding, etc.)
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Skills Ecosystem
|
||||
|
||||
### Skill System
|
||||
- **Namespaced skill registration** for plugin bundles ([#9786](https://github.com/NousResearch/hermes-agent/pull/9786))
|
||||
- **`hermes skills reset`** to un-stick bundled skills ([#11468](https://github.com/NousResearch/hermes-agent/pull/11468))
|
||||
- **Skills guard opt-in** — `config.skills.guard_agent_created` (default off) ([#14557](https://github.com/NousResearch/hermes-agent/pull/14557))
|
||||
- **Bundled skill scripts runnable out of the box** ([#13384](https://github.com/NousResearch/hermes-agent/pull/13384))
|
||||
- **`xitter` replaced with `xurl`** — the official X API CLI ([#12303](https://github.com/NousResearch/hermes-agent/pull/12303))
|
||||
- **MiniMax-AI/cli as default skill tap** (salvage #7501) ([#14493](https://github.com/NousResearch/hermes-agent/pull/14493))
|
||||
- **Fuzzy `@` file completions + mtime sorting** ([#9467](https://github.com/NousResearch/hermes-agent/pull/9467))
|
||||
|
||||
### New Skills
|
||||
- **concept-diagrams** (salvage of #11045, @v1k22) ([#11363](https://github.com/NousResearch/hermes-agent/pull/11363))
|
||||
- **architecture-diagram** (Cocoon AI port) ([#9906](https://github.com/NousResearch/hermes-agent/pull/9906))
|
||||
- **pixel-art** with hardware palettes and video animation ([#12663](https://github.com/NousResearch/hermes-agent/pull/12663), [#12725](https://github.com/NousResearch/hermes-agent/pull/12725))
|
||||
- **baoyu-comic** ([#13257](https://github.com/NousResearch/hermes-agent/pull/13257), @JimLiu)
|
||||
- **baoyu-infographic** — 21 layouts × 21 styles (salvage #9901) ([#12254](https://github.com/NousResearch/hermes-agent/pull/12254))
|
||||
- **page-agent** — embed Alibaba's in-page GUI agent in your webapp ([#13976](https://github.com/NousResearch/hermes-agent/pull/13976))
|
||||
- **fitness-nutrition** optional skill + optional env var support ([#9355](https://github.com/NousResearch/hermes-agent/pull/9355))
|
||||
- **drug-discovery** — ChEMBL, PubChem, OpenFDA, ADMET ([#9443](https://github.com/NousResearch/hermes-agent/pull/9443))
|
||||
- **touchdesigner-mcp** (salvage of #10081) ([#12298](https://github.com/NousResearch/hermes-agent/pull/12298))
|
||||
- **adversarial-ux-test** optional skill (salvage of #2494, @omnissiah-comelse) ([#13425](https://github.com/NousResearch/hermes-agent/pull/13425))
|
||||
- **maps** — added `guest_house`, `camp_site`, and dual-key bakery lookup ([#13398](https://github.com/NousResearch/hermes-agent/pull/13398))
|
||||
- **llm-wiki** — port provenance markers, source hashing, and quality signals ([#13700](https://github.com/NousResearch/hermes-agent/pull/13700))
|
||||
|
||||
---
|
||||
|
||||
## 📊 Web Dashboard
|
||||
|
||||
- **i18n (English + Chinese) language switcher** ([#9453](https://github.com/NousResearch/hermes-agent/pull/9453))
|
||||
- **Live-switching theme system** ([#10687](https://github.com/NousResearch/hermes-agent/pull/10687))
|
||||
- **Dashboard plugin system** — extend the web UI with custom tabs ([#10951](https://github.com/NousResearch/hermes-agent/pull/10951))
|
||||
- **react-router, sidebar layout, sticky header, dropdown component** ([#9370](https://github.com/NousResearch/hermes-agent/pull/9370), @austinpickett)
|
||||
- **Responsive for mobile** ([#9228](https://github.com/NousResearch/hermes-agent/pull/9228), @DeployFaith)
|
||||
- **Vercel deployment** ([#10686](https://github.com/NousResearch/hermes-agent/pull/10686), [#11061](https://github.com/NousResearch/hermes-agent/pull/11061), @austinpickett)
|
||||
- **Context window config support** ([#9357](https://github.com/NousResearch/hermes-agent/pull/9357))
|
||||
- **HTTP health probe for cross-container gateway detection** ([#9894](https://github.com/NousResearch/hermes-agent/pull/9894))
|
||||
- **Update + restart gateway buttons** ([#13526](https://github.com/NousResearch/hermes-agent/pull/13526), @austinpickett)
|
||||
- **Real API call count per session** (salvages #10140) ([#14004](https://github.com/NousResearch/hermes-agent/pull/14004))
|
||||
|
||||
---
|
||||
|
||||
## 🖱️ CLI & User Experience
|
||||
|
||||
- **Dynamic shell completion for bash, zsh, and fish** ([#9785](https://github.com/NousResearch/hermes-agent/pull/9785))
|
||||
- **Light-mode skins + skin-aware completion menus** ([#9461](https://github.com/NousResearch/hermes-agent/pull/9461))
|
||||
- **Numbered keyboard shortcuts** on approval and clarify prompts ([#13416](https://github.com/NousResearch/hermes-agent/pull/13416))
|
||||
- **Markdown stripping, compact multiline previews, external editor** ([#12934](https://github.com/NousResearch/hermes-agent/pull/12934))
|
||||
- **`--ignore-user-config` and `--ignore-rules` flags** (port codex#18646) ([#14277](https://github.com/NousResearch/hermes-agent/pull/14277))
|
||||
- **Account limits section in `/usage`** ([#13428](https://github.com/NousResearch/hermes-agent/pull/13428))
|
||||
- **Doctor: Command Installation check** for `hermes` bin symlink ([#10112](https://github.com/NousResearch/hermes-agent/pull/10112))
|
||||
- **ESC cancels secret/sudo prompts**, clearer skip messaging ([#9902](https://github.com/NousResearch/hermes-agent/pull/9902))
|
||||
- Fix: agent-facing text uses `display_hermes_home()` instead of hardcoded `~/.hermes` ([#10285](https://github.com/NousResearch/hermes-agent/pull/10285))
|
||||
- Fix: enforce `config.yaml` as sole CWD source + deprecate `.env` CWD vars + add `hermes memory reset` ([#11029](https://github.com/NousResearch/hermes-agent/pull/11029))
|
||||
|
||||
---
|
||||
|
||||
## 🔒 Security & Reliability
|
||||
|
||||
- **Global toggle to allow private/internal URL resolution** ([#14166](https://github.com/NousResearch/hermes-agent/pull/14166))
|
||||
- **Block agent from self-destructing the gateway** via terminal (closes #6666) ([#9895](https://github.com/NousResearch/hermes-agent/pull/9895))
|
||||
- **Telegram callback authorization** on update prompts ([#10536](https://github.com/NousResearch/hermes-agent/pull/10536))
|
||||
- **SECURITY.md** added ([#10532](https://github.com/NousResearch/hermes-agent/pull/10532), @I3eg1nner)
|
||||
- **Warn about legacy hermes.service units** during `hermes update` ([#11918](https://github.com/NousResearch/hermes-agent/pull/11918))
|
||||
- **Complete ASCII-locale UnicodeEncodeError recovery** for `api_messages`/`reasoning_content` (closes #6843) ([#10537](https://github.com/NousResearch/hermes-agent/pull/10537))
|
||||
- **Prevent stale `os.environ` leak** after `clear_session_vars` ([#10527](https://github.com/NousResearch/hermes-agent/pull/10527))
|
||||
- **Prevent agent hang when backgrounding processes** via terminal tool ([#10584](https://github.com/NousResearch/hermes-agent/pull/10584))
|
||||
- Many smaller session-resume, interrupt, streaming, and memory-race fixes throughout the window
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Notable Bug Fixes
|
||||
|
||||
The `fix:` category in this window covers 482 PRs. Highlights:
|
||||
|
||||
- Streaming cursor artifacts filtered from Matrix, Telegram, WhatsApp, Discord (multiple PRs)
|
||||
- `<think>` and `<thought>` blocks filtered from gateway stream consumers ([#9408](https://github.com/NousResearch/hermes-agent/pull/9408))
|
||||
- Gateway display.streaming root-config override regression ([#9799](https://github.com/NousResearch/hermes-agent/pull/9799))
|
||||
- Context `session_search` coerces limit to int (prevents TypeError) ([#10522](https://github.com/NousResearch/hermes-agent/pull/10522))
|
||||
- Memory tool stays available when `fcntl` is unavailable (Windows) ([#9783](https://github.com/NousResearch/hermes-agent/pull/9783))
|
||||
- Trajectory compressor credentials load from `HERMES_HOME/.env` ([#9632](https://github.com/NousResearch/hermes-agent/pull/9632), @Dusk1e)
|
||||
- `@_context_completions` no longer crashes on `@` mention ([#9683](https://github.com/NousResearch/hermes-agent/pull/9683), @kshitijk4poor)
|
||||
- Group session `user_id` no longer treated as `thread_id` in shutdown notifications ([#10546](https://github.com/NousResearch/hermes-agent/pull/10546))
|
||||
- Telegram `platform_hint` — markdown is supported (closes #8261) ([#10612](https://github.com/NousResearch/hermes-agent/pull/10612))
|
||||
- Doctor checks for Kimi China credentials fixed
|
||||
- Streaming: don't suppress final response when commentary message is sent ([#10540](https://github.com/NousResearch/hermes-agent/pull/10540))
|
||||
- Rapid Telegram follow-ups no longer get cut off
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing & CI
|
||||
|
||||
- **Contributor attribution CI check** on PRs ([#9376](https://github.com/NousResearch/hermes-agent/pull/9376))
|
||||
- Hermetic test parity (`scripts/run_tests.sh`) held across this window
|
||||
- Test count stabilized post-Transport refactor; CI matrix held green through the transport rollout
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- Atropos + wandb links in user guide
|
||||
- ACP / VS Code / Zed / JetBrains integration docs refresh
|
||||
- Webhook subscription docs updated for direct-delivery mode
|
||||
- Plugin author guide expanded for new hooks (`register_command`, `dispatch_tool`, `transform_tool_result`)
|
||||
- Transport layer developer guide added
|
||||
- Website removed Discussions link from README
|
||||
|
||||
---
|
||||
|
||||
## 👥 Contributors
|
||||
|
||||
### Core
|
||||
- **@teknium1** (Teknium)
|
||||
|
||||
### Top Community Contributors (by merged PR count)
|
||||
- **@kshitijk4poor** — 49 PRs · Transport refactor (AnthropicTransport, ResponsesApiTransport), Step Plan provider, Xiaomi MiMo v2.5 support, numerous gateway fixes, promoted Kimi K2.5, @ mention crash fix
|
||||
- **@OutThisLife** (Brooklyn) — 31 PRs · TUI polish, git branch in status bar, per-turn stopwatch, stable picker keys, `/clear` confirm, light-theme preset, subagent spawn observability overlay
|
||||
- **@helix4u** — 11 PRs · Voice CLI record beep, MCP tool interrupt handling, assorted stability fixes
|
||||
- **@austinpickett** — 8 PRs · Dashboard react-router + sidebar + sticky header + dropdown, Vercel deployment, update + restart buttons
|
||||
- **@alt-glitch** — 8 PRs · PLATFORM_HINTS for Matrix/Mattermost/Feishu, Matrix fixes
|
||||
- **@ethernet8023** — 3 PRs
|
||||
- **@benbarclay** — 3 PRs
|
||||
- **@Aslaaen** — 2 PRs
|
||||
|
||||
### Also contributing
|
||||
@jerilynzheng (ai-gateway pricing), @JimLiu (baoyu-comic skill), @Dusk1e (trajectory compressor credentials), @DeployFaith (mobile-responsive dashboard), @LeonSGP43, @v1k22 (concept-diagrams), @omnissiah-comelse (adversarial-ux-test), @coekfung (Telegram MarkdownV2 expandable blockquotes), @liftaris (TUI provider resolution), @arihantsethia (skill analytics dashboard), @topcheer + @xing8star (QQBot foundation), @kovyrin, @I3eg1nner (SECURITY.md), @PeterBerthelsen, @lengxii, @priveperfumes, @sjz-ks, @cuyua9, @Disaster-Terminator, @leozeli, @LehaoLin, @trevthefoolish, @loongfay, @MrNiceRicee, @WideLee, @bluefishs, @malaiwah, @bobashopcashier, @dsocolobsky, @iamagenius00, @IAvecilla, @aniruddhaadak80, @Es1la, @asheriif, @walli, @jquesnelle (original Tool Gateway work).
|
||||
|
||||
### All Contributors (alphabetical)
|
||||
|
||||
@0xyg3n, @10ishq, @A-afflatus, @Abnertheforeman, @admin28980, @adybag14-cyber, @akhater, @alexzhu0,
|
||||
@AllardQuek, @alt-glitch, @aniruddhaadak80, @anna-oake, @anniesurla, @anthhub, @areu01or00, @arihantsethia,
|
||||
@arthurbr11, @asheriif, @Aslaaen, @Asunfly, @austinpickett, @AviArora02-commits, @AxDSan, @azhengbot, @Bartok9,
|
||||
@benbarclay, @bennytimz, @bernylinville, @bingo906, @binhnt92, @bkadish, @bluefishs, @bobashopcashier,
|
||||
@brantzh6, @BrennerSpear, @brianclemens, @briandevans, @brooklynnicholson, @bugkill3r, @buray, @burtenshaw,
|
||||
@cdanis, @cgarwood82, @ChimingLiu, @chongweiliu, @christopherwoodall, @coekfung, @cola-runner, @corazzione,
|
||||
@counterposition, @cresslank, @cuyua9, @cypres0099, @danieldoderlein, @davetist, @davidvv, @DeployFaith,
|
||||
@Dev-Mriganka, @devorun, @dieutx, @Disaster-Terminator, @dodo-reach, @draix, @DrStrangerUJN, @dsocolobsky,
|
||||
@Dusk1e, @dyxushuai, @elkimek, @elmatadorgh, @emozilla, @entropidelic, @Erosika, @erosika, @Es1la, @etcircle,
|
||||
@etherman-os, @ethernet8023, @fancydirty, @farion1231, @fatinghenji, @Fatty911, @fengtianyu88, @Feranmi10,
|
||||
@flobo3, @francip, @fuleinist, @g-guthrie, @GenKoKo, @gianfrancopiana, @gnanam1990, @GuyCui, @haileymarshall,
|
||||
@haimu0x, @handsdiff, @hansnow, @hedgeho9X, @helix4u, @hengm3467, @HenkDz, @heykb, @hharry11, @HiddenPuppy,
|
||||
@honghua, @houko, @houziershi, @hsy5571616, @huangke19, @hxp-plus, @Hypn0sis, @I3eg1nner, @iacker,
|
||||
@iamagenius00, @IAvecilla, @iborazzi, @Ifkellx, @ifrederico, @imink, @isaachuangGMICLOUD, @ismell0992-afk,
|
||||
@j0sephz, @Jaaneek, @jackjin1997, @JackTheGit, @jaffarkeikei, @jerilynzheng, @JiaDe-Wu, @Jiawen-lee, @JimLiu,
|
||||
@jinzheng8115, @jneeee, @jplew, @jquesnelle, @Julientalbot, @Junass1, @jvcl, @kagura-agent, @keifergu,
|
||||
@kevinskysunny, @keyuyuan, @konsisumer, @kovyrin, @kshitijk4poor, @leeyang1990, @LehaoLin, @lengxii,
|
||||
@LeonSGP43, @leozeli, @li0near, @liftaris, @Lind3ey, @Linux2010, @liujinkun2025, @LLQWQ, @Llugaes, @lmoncany,
|
||||
@longsizhuo, @lrawnsley, @Lubrsy706, @lumenradley, @luyao618, @lvnilesh, @LVT382009, @m0n5t3r, @Magaav,
|
||||
@MagicRay1217, @malaiwah, @manuelschipper, @Marvae, @MassiveMassimo, @mavrickdeveloper, @maxchernin, @memosr,
|
||||
@meng93, @mengjian-github, @MestreY0d4-Uninter, @Mibayy, @MikeFac, @mikewaters, @milkoor, @minorgod,
|
||||
@MrNiceRicee, @ms-alan, @mvanhorn, @n-WN, @N0nb0at, @Nan93, @NIDNASSER-Abdelmajid, @nish3451, @niyoh120,
|
||||
@nocoo, @nosleepcassette, @NousResearch, @ogzerber, @omnissiah-comelse, @Only-Code-A, @opriz, @OwenYWT, @pedh,
|
||||
@pefontana, @PeterBerthelsen, @phpoh, @pinion05, @plgonzalezrx8, @pradeep7127, @priveperfumes,
|
||||
@projectadmin-dev, @PStarH, @rnijhara, @Roy-oss1, @roytian1217, @RucchiZ, @Ruzzgar, @RyanLee-Dev, @Salt-555,
|
||||
@Sanjays2402, @sgaofen, @sharziki, @shenuu, @shin4, @SHL0MS, @shushuzn, @sicnuyudidi, @simon-gtcl,
|
||||
@simon-marcus, @sirEven, @Sisyphus, @sjz-ks, @snreynolds, @Societus, @Somme4096, @sontianye, @sprmn24,
|
||||
@StefanIsMe, @stephenschoettler, @Swift42, @taeng0204, @taeuk178, @tannerfokkens-maker, @TaroballzChen,
|
||||
@ten-ltw, @teyrebaz33, @Tianworld, @topcheer, @Tranquil-Flow, @trevthefoolish, @TroyMitchell911, @UNLINEARITY,
|
||||
@v1k22, @vivganes, @vominh1919, @vrinek, @VTRiot, @WadydX, @walli, @wenhao7, @WhiteWorld, @WideLee, @wujhsu,
|
||||
@WuTianyi123, @Wysie, @xandersbell, @xiaoqiang243, @xiayh0107, @xinpengdr, @Xowiek, @ycbai, @yeyitech, @ygd58,
|
||||
@youngDoo, @yudaiyan, @Yukipukii1, @yule975, @yyq4193, @yzx9, @ZaynJarvis, @zhang9w0v5, @zhanggttry,
|
||||
@zhangxicen, @zhongyueming1121, @zhouxiaoya12, @zons-zhaozhy
|
||||
|
||||
Also: @maelrx, @Marco Rutsch, @MaxsolcuCrypto, @Mind-Dragon, @Paul Bergeron, @say8hi, @whitehatjr1001.
|
||||
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v2026.4.13...v2026.4.23](https://github.com/NousResearch/hermes-agent/compare/v2026.4.13...v2026.4.23)
|
||||
@@ -1,505 +0,0 @@
|
||||
# Hermes Agent v0.12.0 (v2026.4.30)
|
||||
|
||||
**Release Date:** April 30, 2026
|
||||
**Since v0.11.0:** 1,096 commits · 550 merged PRs · 1,270 files changed · 217,776 insertions · 213 community contributors (including co-authors)
|
||||
|
||||
> The Curator release — Hermes Agent now maintains itself. An autonomous background Curator grades, prunes, and consolidates your skill library on its own schedule. The self-improvement loop that reviews what to save got a substantial upgrade. Four new inference providers, a 18th messaging platform, a 19th via Teams plugin, native Spotify + Google Meet integrations, ComfyUI and TouchDesigner-MCP moved from optional to bundled-by-default, and a ~57% cut to visible TUI cold start.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Highlights
|
||||
|
||||
- **Autonomous Curator** — `hermes curator` runs as a background agent on the gateway's cron ticker (7-day cycle default). It grades your skill library, consolidates related skills, prunes dead ones, and writes per-run reports to `logs/curator/run.json` + `REPORT.md`. Archived skills are classified consolidated-vs-pruned via model + heuristic. Defense-in-depth gates protect bundled/hub skills from mutation. Unified under `auxiliary.curator` — pick the curator's model in `hermes model`, manage it from the dashboard. `hermes curator status` ranks skills by usage (most-used / least-used). ([#17277](https://github.com/NousResearch/hermes-agent/pull/17277), [#17307](https://github.com/NousResearch/hermes-agent/pull/17307), [#17941](https://github.com/NousResearch/hermes-agent/pull/17941), [#17868](https://github.com/NousResearch/hermes-agent/pull/17868), [#18033](https://github.com/NousResearch/hermes-agent/pull/18033))
|
||||
|
||||
- **Self-improvement loop — substantially upgraded** — The background review fork (the core of Hermes' self-improvement: after each turn it decides what memories/skills to save or update) is now class-first (rubric-based rather than free-form), active-update biased (prefers the skill the agent just loaded), handles `references/`/`templates/` sub-files, and properly inherits the parent's live runtime (provider, model, credentials actually propagate). Restricted to memory + skills toolsets so it can't sprawl. Memory providers shut down cleanly. Prior-turn tool messages excluded from the summary so the fork sees a clean context. ([#16026](https://github.com/NousResearch/hermes-agent/pull/16026), [#17213](https://github.com/NousResearch/hermes-agent/pull/17213), [#16099](https://github.com/NousResearch/hermes-agent/pull/16099), [#16569](https://github.com/NousResearch/hermes-agent/pull/16569), [#16204](https://github.com/NousResearch/hermes-agent/pull/16204), [#15057](https://github.com/NousResearch/hermes-agent/pull/15057))
|
||||
|
||||
- **Skill integrations — major expansion** — **ComfyUI v5** with official CLI + REST + hardware-gated local install, moved from optional to **built-in by default** ([#17610](https://github.com/NousResearch/hermes-agent/pull/17610), [#17631](https://github.com/NousResearch/hermes-agent/pull/17631), [#17734](https://github.com/NousResearch/hermes-agent/pull/17734)). **TouchDesigner-MCP** bundled by default, expanded with GLSL, post-FX, audio, geometry, and 9 new reference docs ([#16753](https://github.com/NousResearch/hermes-agent/pull/16753), [#16624](https://github.com/NousResearch/hermes-agent/pull/16624), [#16768](https://github.com/NousResearch/hermes-agent/pull/16768) — @kshitijk4poor + @SHL0MS). **Humanizer** skill ports a text-cleaner that strips AI-isms ([#16787](https://github.com/NousResearch/hermes-agent/pull/16787)). **claude-design** HTML artifact skill + design-md (Google DESIGN.md spec) + airtable salvage + `skill_manage` edits in `external_dirs` + direct-URL skill install + `/reload-skills` slash command. ([#16358](https://github.com/NousResearch/hermes-agent/pull/16358), [#14876](https://github.com/NousResearch/hermes-agent/pull/14876), [#16291](https://github.com/NousResearch/hermes-agent/pull/16291), [#17512](https://github.com/NousResearch/hermes-agent/pull/17512), [#16323](https://github.com/NousResearch/hermes-agent/pull/16323), [#17744](https://github.com/NousResearch/hermes-agent/pull/17744))
|
||||
|
||||
- **LM Studio — first-class provider** — upgraded from a custom-endpoint alias to a full-blown native provider: dedicated auth, `hermes doctor` checks, reasoning transport, live `/models` listing. (Salvage of @kshitijk4poor's #17061.) ([#17102](https://github.com/NousResearch/hermes-agent/pull/17102))
|
||||
|
||||
- **Four more new inference providers** — **GMI Cloud** (first-class, salvage of #11955 — @isaachuangGMICLOUD), **Azure AI Foundry** with auto-detection, **MiniMax OAuth** with PKCE browser flow (salvage #15203), **Tencent Tokenhub** (salvage of #16860). ([#16663](https://github.com/NousResearch/hermes-agent/pull/16663), [#15845](https://github.com/NousResearch/hermes-agent/pull/15845), [#17524](https://github.com/NousResearch/hermes-agent/pull/17524), [#16960](https://github.com/NousResearch/hermes-agent/pull/16960))
|
||||
|
||||
- **Pluggable gateway platforms + Microsoft Teams** — the gateway is now a plugin host. Drop-in messaging adapters live outside the core, and Microsoft Teams is the first plugin-shipped platform. (Salvage of #17664.) ([#17751](https://github.com/NousResearch/hermes-agent/pull/17751), [#17828](https://github.com/NousResearch/hermes-agent/pull/17828))
|
||||
|
||||
- **Tencent 元宝 (Yuanbao) — 18th messaging platform** — native gateway adapter with text + media delivery. ([#16298](https://github.com/NousResearch/hermes-agent/pull/16298), [#17424](https://github.com/NousResearch/hermes-agent/pull/17424))
|
||||
|
||||
- **Spotify — native tools + bundled skill + wizard** — 7 tools (play, search, queue, playlists, devices) behind PKCE OAuth, interactive setup wizard, bundled skill, surfacing in `hermes tools`, cron usage documented. ([#15121](https://github.com/NousResearch/hermes-agent/pull/15121), [#15130](https://github.com/NousResearch/hermes-agent/pull/15130), [#15154](https://github.com/NousResearch/hermes-agent/pull/15154), [#15180](https://github.com/NousResearch/hermes-agent/pull/15180))
|
||||
|
||||
- **Google Meet plugin** — join calls, transcribe, speak, follow up. Realtime OpenAI transport + Node bot server, full pipeline bundled as a plugin. ([#16364](https://github.com/NousResearch/hermes-agent/pull/16364))
|
||||
|
||||
- **`hermes -z` one-shot mode + `hermes update --check`** — non-interactive `hermes -z <prompt>` with `--model`/`--provider`/`HERMES_INFERENCE_MODEL`. `hermes update --check` preflight. Opt-in pre-update HERMES_HOME backup. ([#15702](https://github.com/NousResearch/hermes-agent/pull/15702), [#15704](https://github.com/NousResearch/hermes-agent/pull/15704), [#15841](https://github.com/NousResearch/hermes-agent/pull/15841), [#16539](https://github.com/NousResearch/hermes-agent/pull/16539), [#16566](https://github.com/NousResearch/hermes-agent/pull/16566))
|
||||
|
||||
- **Models dashboard tab + in-browser model config** — rich per-model analytics, switch main + auxiliary models from the dashboard. ([#17745](https://github.com/NousResearch/hermes-agent/pull/17745), [#17802](https://github.com/NousResearch/hermes-agent/pull/17802))
|
||||
|
||||
- **Remote model catalog manifest** — OpenRouter + Nous Portal model catalogs are now pulled from a remote manifest so new models show up without a release. ([#16033](https://github.com/NousResearch/hermes-agent/pull/16033))
|
||||
|
||||
- **Native multimodal image routing** — images now route based on the model's actual vision capability rather than provider defaults. ([#16506](https://github.com/NousResearch/hermes-agent/pull/16506))
|
||||
|
||||
- **Gateway media parity** — native multi-image sending across Telegram, Discord, Slack, Mattermost, Email, and Signal; centralized audio routing with FLAC support + Telegram document fallback. ([#17909](https://github.com/NousResearch/hermes-agent/pull/17909), [#17833](https://github.com/NousResearch/hermes-agent/pull/17833))
|
||||
|
||||
- **TUI catches up to (and past) the classic CLI** — LaTeX rendering (@austinpickett), `/reload` .env hot-reload, pluggable busy-indicator styles (@OutThisLife, #13610), opt-in auto-resume of last session, expanded light-terminal auto-detection, session delete from `/resume` picker with `d`, modified mouse-wheel line scroll, and a `/mouse` toggle that kills ConPTY's phantom mouse injection (@kevin-ho). ([#17175](https://github.com/NousResearch/hermes-agent/pull/17175), [#17286](https://github.com/NousResearch/hermes-agent/pull/17286), [#17150](https://github.com/NousResearch/hermes-agent/pull/17150), [#17130](https://github.com/NousResearch/hermes-agent/pull/17130), [#17113](https://github.com/NousResearch/hermes-agent/pull/17113), [#17668](https://github.com/NousResearch/hermes-agent/pull/17668), [#17669](https://github.com/NousResearch/hermes-agent/pull/17669), [#15488](https://github.com/NousResearch/hermes-agent/pull/15488))
|
||||
|
||||
- **Observability + achievements plugins** — bundled Langfuse observability plugin (salvage #16845) + bundled hermes-achievements plugin that scans full session history. ([#16917](https://github.com/NousResearch/hermes-agent/pull/16917), [#17754](https://github.com/NousResearch/hermes-agent/pull/17754))
|
||||
|
||||
- **TTS provider registry + Piper local TTS** — pluggable `tts.providers.<name>` registry; Piper ships as a native local TTS provider. (Closes #8508.) ([#17843](https://github.com/NousResearch/hermes-agent/pull/17843), [#17885](https://github.com/NousResearch/hermes-agent/pull/17885))
|
||||
|
||||
- **Vercel Sandbox backend** — Vercel sandboxes as an execute_code/terminal backend (@kshitijk4poor). ([#17445](https://github.com/NousResearch/hermes-agent/pull/17445))
|
||||
|
||||
- **Secret redaction off by default** — default flipped to off. Prevents the long-standing patch-corruption incidents where fake secret-shaped substrings mangled tool outputs. Opt in via `redaction.enabled: true` when you need it. ([#16794](https://github.com/NousResearch/hermes-agent/pull/16794))
|
||||
|
||||
- **Cold-start performance** — visible TUI cold start cut **~57%** via lazy agent init (@OutThisLife), lazy imports of OpenAI / Anthropic / Firecrawl / account_usage, mtime-cached `load_config()`, memoized `get_tool_definitions()` with TTL-cached `check_fn` results, precompiled dangerous-command patterns. ([#17190](https://github.com/NousResearch/hermes-agent/pull/17190), [#17046](https://github.com/NousResearch/hermes-agent/pull/17046), [#17041](https://github.com/NousResearch/hermes-agent/pull/17041), [#17098](https://github.com/NousResearch/hermes-agent/pull/17098), [#17206](https://github.com/NousResearch/hermes-agent/pull/17206))
|
||||
|
||||
- **Configurable prompt cache TTL** — `prompt_caching.cache_ttl` (5m default, 1h opt-in — cost savings for bursty sessions that keep cache warm). Salvage of #12659. ([#15065](https://github.com/NousResearch/hermes-agent/pull/15065))
|
||||
|
||||
---
|
||||
|
||||
## 🧠 Autonomous Curator & Self-Improvement Loop
|
||||
|
||||
### Curator — autonomous skill maintenance
|
||||
- **`hermes curator` as a background agent** — runs on the gateway's cron ticker, 7-day cycle by default, umbrella-first prompt, inherits parent config, unbounded iterations ([#17277](https://github.com/NousResearch/hermes-agent/pull/17277) — issue #7816)
|
||||
- **Per-run reports** — `logs/curator/run.json` + `REPORT.md` per cycle ([#17307](https://github.com/NousResearch/hermes-agent/pull/17307))
|
||||
- **Consolidated vs pruned classification** — archived skills split with model + heuristic ([#17941](https://github.com/NousResearch/hermes-agent/pull/17941))
|
||||
- **`hermes curator status`** — ranks skills by usage, shows most-used and least-used ([#18033](https://github.com/NousResearch/hermes-agent/pull/18033))
|
||||
- **Unified under `auxiliary.curator`** — pick the model in `hermes model`, configure from the dashboard ([#17868](https://github.com/NousResearch/hermes-agent/pull/17868))
|
||||
- **Documentation** — dedicated curator feature page on the docs site ([#17563](https://github.com/NousResearch/hermes-agent/pull/17563))
|
||||
- Fix: seed defaults on update, create `logs/curator/` directory, defer fire import ([#17927](https://github.com/NousResearch/hermes-agent/pull/17927))
|
||||
- Fix: scan nested archive subdirs in `restore_skill` (@0xDevNinja) ([#17951](https://github.com/NousResearch/hermes-agent/pull/17951))
|
||||
- Fix: use actual skill activity in curator status (@y0shua1ee) ([#17953](https://github.com/NousResearch/hermes-agent/pull/17953))
|
||||
- Fix: `skill_manage` refuses writes on pinned skills; pinning now blocks curator writes ([#17562](https://github.com/NousResearch/hermes-agent/pull/17562), [#17578](https://github.com/NousResearch/hermes-agent/pull/17578))
|
||||
- Fix: `bump_use()` wired into skill invocation + preload + skill_view (salvage #17782) ([#17932](https://github.com/NousResearch/hermes-agent/pull/17932))
|
||||
|
||||
### Self-improvement loop (background review fork)
|
||||
- **Class-first skill-review prompt** — rubric-based grading rather than free-form "should this update" ([#16026](https://github.com/NousResearch/hermes-agent/pull/16026))
|
||||
- **Active-update bias** — prefers updating skills the agent just loaded, handles `references/` + `templates/` sub-files ([#17213](https://github.com/NousResearch/hermes-agent/pull/17213))
|
||||
- **Fork inherits parent's live runtime** — provider, model, credentials actually propagate now ([#16099](https://github.com/NousResearch/hermes-agent/pull/16099))
|
||||
- **Scoped toolsets** — review fork restricted to memory + skills (no shell, no web) ([#16569](https://github.com/NousResearch/hermes-agent/pull/16569))
|
||||
- **Clean shutdown** — background review memory providers exit properly (salvage #15289) ([#16204](https://github.com/NousResearch/hermes-agent/pull/16204))
|
||||
- **Clean context** — prior-history tool messages excluded from review summary (salvage #14967) ([#15057](https://github.com/NousResearch/hermes-agent/pull/15057))
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Skills Ecosystem
|
||||
|
||||
### Skill integrations — newly bundled or promoted
|
||||
- **ComfyUI v5** — official CLI + REST + hardware-gated local install; **moved from optional to built-in** ([#17610](https://github.com/NousResearch/hermes-agent/pull/17610), [#17631](https://github.com/NousResearch/hermes-agent/pull/17631), [#17734](https://github.com/NousResearch/hermes-agent/pull/17734), [#17612](https://github.com/NousResearch/hermes-agent/pull/17612))
|
||||
- **TouchDesigner-MCP** — **bundled by default** ([#16753](https://github.com/NousResearch/hermes-agent/pull/16753) — @kshitijk4poor), expanded with GLSL, post-FX, audio, geometry references ([#16624](https://github.com/NousResearch/hermes-agent/pull/16624)), 9 new reference docs ([#16768](https://github.com/NousResearch/hermes-agent/pull/16768) — @SHL0MS)
|
||||
- **Humanizer** — strips AI-isms from text ([#16787](https://github.com/NousResearch/hermes-agent/pull/16787))
|
||||
- **claude-design** — HTML artifact skill with disambiguation from other design skills ([#16358](https://github.com/NousResearch/hermes-agent/pull/16358))
|
||||
- **design-md** — Google's DESIGN.md spec skill ([#14876](https://github.com/NousResearch/hermes-agent/pull/14876))
|
||||
- **airtable** — salvaged skill + skill API keys wired into `.env` (#15838) ([#16291](https://github.com/NousResearch/hermes-agent/pull/16291))
|
||||
- **pretext** — creative browser demos with @chenglou/pretext ([#17259](https://github.com/NousResearch/hermes-agent/pull/17259))
|
||||
- **spike** + **sketch** — throwaway experiments + HTML mockups, adapted from gsd-build ([#17421](https://github.com/NousResearch/hermes-agent/pull/17421))
|
||||
|
||||
### Skills UX
|
||||
- **Install skills from a direct HTTP(S) URL** — `hermes skills install <url>` ([#16323](https://github.com/NousResearch/hermes-agent/pull/16323))
|
||||
- **`/reload-skills`** slash command (salvage #17670) ([#17744](https://github.com/NousResearch/hermes-agent/pull/17744))
|
||||
- **`hermes skills list`** shows enabled/disabled status ([#16129](https://github.com/NousResearch/hermes-agent/pull/16129))
|
||||
- **`skill_manage` refuses writes on pinned skills** ([#17562](https://github.com/NousResearch/hermes-agent/pull/17562))
|
||||
- **`skill_manage` edits external_dirs skills in place** (salvage #9966) ([#17512](https://github.com/NousResearch/hermes-agent/pull/17512), [#17289](https://github.com/NousResearch/hermes-agent/pull/17289))
|
||||
- Fix: inline-shell rendering in `skill_view` ([#15376](https://github.com/NousResearch/hermes-agent/pull/15376))
|
||||
- Fix: exclude `.archive/` from skill index walk (salvage #17639) ([#17931](https://github.com/NousResearch/hermes-agent/pull/17931))
|
||||
- Fix: dedicated docs page per bundled + optional skill ([#14929](https://github.com/NousResearch/hermes-agent/pull/14929))
|
||||
- Fix: `google-workspace` shared HERMES_HOME helper + ship deps as optional extra ([#15405](https://github.com/NousResearch/hermes-agent/pull/15405))
|
||||
- Fix: auto-wrap ASCII-art code blocks in generated skill pages ([#16497](https://github.com/NousResearch/hermes-agent/pull/16497))
|
||||
- Point agent at `hermes-agent` skill + docs site for Hermes questions ([#16535](https://github.com/NousResearch/hermes-agent/pull/16535))
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Core Agent & Architecture
|
||||
|
||||
### Provider & Model Support
|
||||
|
||||
#### New providers
|
||||
- **GMI Cloud** — first-class API-key provider on par with Arcee/Kilocode/Xiaomi (salvage of #11955 — @isaachuangGMICLOUD) ([#16663](https://github.com/NousResearch/hermes-agent/pull/16663))
|
||||
- **Azure AI Foundry** — auto-detection, full wiring ([#15845](https://github.com/NousResearch/hermes-agent/pull/15845))
|
||||
- **LM Studio** — upgraded from custom-endpoint alias to first-class provider: dedicated auth, doctor checks, reasoning transport, live `/models` (salvage of #17061 — @kshitijk4poor) ([#17102](https://github.com/NousResearch/hermes-agent/pull/17102))
|
||||
- **MiniMax OAuth** — PKCE browser flow with full OAuth integration (salvage #15203) ([#17524](https://github.com/NousResearch/hermes-agent/pull/17524))
|
||||
- **Tencent Tokenhub** — new provider (salvage of #16860) ([#16960](https://github.com/NousResearch/hermes-agent/pull/16960))
|
||||
|
||||
#### Model catalog
|
||||
- **Remote model catalog manifest** — OpenRouter + Nous Portal catalogs pulled from remote manifest so new models show up without a release ([#16033](https://github.com/NousResearch/hermes-agent/pull/16033))
|
||||
- `openai/gpt-5.5` and `gpt-5.5-pro` added to OpenRouter + Nous Portal ([#15343](https://github.com/NousResearch/hermes-agent/pull/15343))
|
||||
- `deepseek-v4-pro` and `deepseek-v4-flash` added ([#14934](https://github.com/NousResearch/hermes-agent/pull/14934))
|
||||
- `qwen3.6-plus` added to Alibaba-supported models ([#16896](https://github.com/NousResearch/hermes-agent/pull/16896))
|
||||
- Gemini free-tier keys blocked at setup with 429 guidance surfacing ([#15100](https://github.com/NousResearch/hermes-agent/pull/15100))
|
||||
|
||||
#### Model configuration
|
||||
- **Configurable `prompt_caching.cache_ttl`** — 5m default, 1h opt-in (salvage #12659) ([#15065](https://github.com/NousResearch/hermes-agent/pull/15065))
|
||||
- `/fast` whitelist broadened to all OpenAI + Anthropic models ([#16883](https://github.com/NousResearch/hermes-agent/pull/16883))
|
||||
- `auxiliary.extra_body.reasoning` translates into Codex Responses API ([#17004](https://github.com/NousResearch/hermes-agent/pull/17004))
|
||||
- `hermes fallback` command for managing fallback providers ([#16052](https://github.com/NousResearch/hermes-agent/pull/16052))
|
||||
|
||||
### Agent Loop & Conversation
|
||||
- **Native multimodal image routing** — based on model vision capability, not provider defaults ([#16506](https://github.com/NousResearch/hermes-agent/pull/16506))
|
||||
- **Delegate `child_timeout_seconds` default bumped to 600s** ([#14809](https://github.com/NousResearch/hermes-agent/pull/14809))
|
||||
- **Diagnostic dump when subagent times out with 0 API calls** ([#15105](https://github.com/NousResearch/hermes-agent/pull/15105))
|
||||
- **Gateway busts cached agent on compression/context_length config edits** ([#17008](https://github.com/NousResearch/hermes-agent/pull/17008))
|
||||
- **Opt-in runtime-metadata footer on final replies** ([#17026](https://github.com/NousResearch/hermes-agent/pull/17026))
|
||||
- `/reload-mcp` awareness — rebuild cached agents + prompt-cache cost confirmation ([#17729](https://github.com/NousResearch/hermes-agent/pull/17729))
|
||||
- Fix: repair CamelCase + `_tool` suffix tool-call emissions ([#15124](https://github.com/NousResearch/hermes-agent/pull/15124))
|
||||
- Fix: retry on `json.JSONDecodeError` instead of treating as local validation error ([#15107](https://github.com/NousResearch/hermes-agent/pull/15107))
|
||||
- Fix: handle unescaped control chars in `tool_call.arguments` ([#15356](https://github.com/NousResearch/hermes-agent/pull/15356))
|
||||
- Fix: ordering fix in `_copy_reasoning_content_for_api` — cross-provider reasoning isolation (@Zjianru) ([#15749](https://github.com/NousResearch/hermes-agent/pull/15749))
|
||||
- Fix: inject empty `reasoning_content` for DeepSeek/Kimi `tool_calls` unconditionally (@Zjianru) ([#15762](https://github.com/NousResearch/hermes-agent/pull/15762))
|
||||
- Fix: persist streamed `reasoning_content` on assistant turns (#16844) ([#16892](https://github.com/NousResearch/hermes-agent/pull/16892))
|
||||
- Fix: cancel coroutine on timeout so worker thread exits; full traceback on tool failure ([#17428](https://github.com/NousResearch/hermes-agent/pull/17428))
|
||||
- Fix: isolate `get_tool_definitions` quiet_mode cache + dedup LCM injection (#17335) ([#17889](https://github.com/NousResearch/hermes-agent/pull/17889))
|
||||
- Fix: serialize concurrent `hermes_tools` RPC calls from `execute_code` (#17770) ([#17894](https://github.com/NousResearch/hermes-agent/pull/17894), [#17902](https://github.com/NousResearch/hermes-agent/pull/17902))
|
||||
- Fix: rename `[SYSTEM:` → `[IMPORTANT:` in all user-injected markers (dodges Azure content filter) ([#16114](https://github.com/NousResearch/hermes-agent/pull/16114))
|
||||
|
||||
### Compression
|
||||
- **Retry summary on main model for unknown errors before giving up** ([#16774](https://github.com/NousResearch/hermes-agent/pull/16774))
|
||||
- **Notify users when configured aux model fails even if main-model fallback recovers** ([#16775](https://github.com/NousResearch/hermes-agent/pull/16775))
|
||||
- `/compress` wrapped in `_busy_command` to block input during compression ([#15388](https://github.com/NousResearch/hermes-agent/pull/15388))
|
||||
- Fix: reserve system + tools headroom when aux binds threshold ([#15631](https://github.com/NousResearch/hermes-agent/pull/15631))
|
||||
- Fix: use text-char sum for multimodal token estimation in `_find_tail_cut_by_tokens` ([#16369](https://github.com/NousResearch/hermes-agent/pull/16369))
|
||||
|
||||
### Session, Memory & State
|
||||
- **Trigram FTS5 index for CJK search, replace LIKE fallback** (@alt-glitch) ([#16651](https://github.com/NousResearch/hermes-agent/pull/16651))
|
||||
- **Index `tool_name` + `tool_calls` in FTS5, with repair + migration** (salvages #16866) ([#16914](https://github.com/NousResearch/hermes-agent/pull/16914))
|
||||
- **Checkpoints: auto-prune orphan and stale shadow repos at startup** ([#16303](https://github.com/NousResearch/hermes-agent/pull/16303))
|
||||
- **Memory providers notified on mid-process session_id rotation** (#6672) ([#17409](https://github.com/NousResearch/hermes-agent/pull/17409))
|
||||
- Fix: quote underscored terms in FTS5 query sanitization ([#16915](https://github.com/NousResearch/hermes-agent/pull/16915))
|
||||
- Fix: resolve viking_read 500/412 on file URIs + pseudo-summary URIs (salvage #5886) ([#17869](https://github.com/NousResearch/hermes-agent/pull/17869))
|
||||
- Fix: skip external-provider sync on interrupted turns ([#15395](https://github.com/NousResearch/hermes-agent/pull/15395))
|
||||
- Fix: close embedded Hindsight async client cleanly (salvage #14605) ([#16209](https://github.com/NousResearch/hermes-agent/pull/16209))
|
||||
- Fix: pass session transcript to `shutdown_memory_provider` on gateway + CLI (#15165) ([#16571](https://github.com/NousResearch/hermes-agent/pull/16571))
|
||||
- Fix: write-origin metadata seam ([#15346](https://github.com/NousResearch/hermes-agent/pull/15346))
|
||||
- Fix: preserve symlinks during atomic file writes ([#16980](https://github.com/NousResearch/hermes-agent/pull/16980))
|
||||
- Refactor: remove `flush_memories` entirely ([#15696](https://github.com/NousResearch/hermes-agent/pull/15696))
|
||||
|
||||
### Auxiliary models
|
||||
- Fix: surface auxiliary failures in UI (previously silent) ([#15324](https://github.com/NousResearch/hermes-agent/pull/15324))
|
||||
- Fix: surface title-gen auxiliary failures instead of silently dropping ([#16371](https://github.com/NousResearch/hermes-agent/pull/16371))
|
||||
- Fix: generalize unsupported-parameter detector and harden `max_tokens` retry ([#15633](https://github.com/NousResearch/hermes-agent/pull/15633))
|
||||
|
||||
---
|
||||
|
||||
## 📱 Messaging Platforms (Gateway)
|
||||
|
||||
### New Platforms
|
||||
- **Microsoft Teams (19th platform)** — as a plugin, + xdist collision guard ([#17828](https://github.com/NousResearch/hermes-agent/pull/17828))
|
||||
- **Yuanbao (Tencent 元宝, 18th platform)** — native adapter with text + media delivery ([#16298](https://github.com/NousResearch/hermes-agent/pull/16298), [#17424](https://github.com/NousResearch/hermes-agent/pull/17424), [#16880](https://github.com/NousResearch/hermes-agent/pull/16880))
|
||||
|
||||
### Pluggable Gateway Platforms
|
||||
- **Drop-in messaging adapters** — the gateway is now a plugin host for platforms (salvage of #17664) ([#17751](https://github.com/NousResearch/hermes-agent/pull/17751))
|
||||
|
||||
### Telegram
|
||||
- **Chat allowlists for groups and forums** (@web3blind) ([#15027](https://github.com/NousResearch/hermes-agent/pull/15027))
|
||||
- **Send fresh finals for stale preview streams** (port openclaw#72038) ([#16261](https://github.com/NousResearch/hermes-agent/pull/16261))
|
||||
- **Render markdown tables as row-group bullets + prompt hint** ([#16997](https://github.com/NousResearch/hermes-agent/pull/16997))
|
||||
- Document fallback in centralized audio routing ([#17833](https://github.com/NousResearch/hermes-agent/pull/17833))
|
||||
- Native multi-image sending ([#17909](https://github.com/NousResearch/hermes-agent/pull/17909))
|
||||
|
||||
### Discord
|
||||
- **Opt-in toolsets + ID injection + tool split + Feishu wiring** (salvage #15457, #15458) ([#15610](https://github.com/NousResearch/hermes-agent/pull/15610), [#15613](https://github.com/NousResearch/hermes-agent/pull/15613))
|
||||
- Fix: coerce `limit` parameter to int before `min()` call ([#16319](https://github.com/NousResearch/hermes-agent/pull/16319))
|
||||
|
||||
### Slack
|
||||
- **Register every gateway command as a native slash (Discord/Telegram parity)** ([#16164](https://github.com/NousResearch/hermes-agent/pull/16164))
|
||||
- **`strict_mention` config** — prevents thread auto-engagement ([#16193](https://github.com/NousResearch/hermes-agent/pull/16193))
|
||||
- **`channel_skill_bindings`** — bind specific skills to specific Slack channels ([#16283](https://github.com/NousResearch/hermes-agent/pull/16283))
|
||||
|
||||
### Signal
|
||||
- **Native formatting** — markdown → bodyRanges, reply quotes, reactions ([#17417](https://github.com/NousResearch/hermes-agent/pull/17417))
|
||||
- Native multi-image sending ([#17909](https://github.com/NousResearch/hermes-agent/pull/17909))
|
||||
|
||||
### Feishu / Mattermost / Email / Signal
|
||||
- All participate in **native multi-image sending** ([#17909](https://github.com/NousResearch/hermes-agent/pull/17909))
|
||||
|
||||
### Gateway Core
|
||||
- **Centralized audio routing + FLAC support + Telegram doc fallback** ([#17833](https://github.com/NousResearch/hermes-agent/pull/17833))
|
||||
- **Native multi-image sending** across Telegram, Discord, Slack, Mattermost, Email, Signal ([#17909](https://github.com/NousResearch/hermes-agent/pull/17909))
|
||||
- **Make hygiene hard message limit configurable** ([#17000](https://github.com/NousResearch/hermes-agent/pull/17000))
|
||||
- **Opt-in runtime-metadata footer on final replies** ([#17026](https://github.com/NousResearch/hermes-agent/pull/17026))
|
||||
- **`pre_gateway_dispatch` hook** — plugins can intercept before dispatch ([#15050](https://github.com/NousResearch/hermes-agent/pull/15050))
|
||||
- **`pre_approval_request` / `post_approval_response` hooks** ([#16776](https://github.com/NousResearch/hermes-agent/pull/16776))
|
||||
- Fix: timeouts — guard `load_config()` call against runtime exceptions ([#16318](https://github.com/NousResearch/hermes-agent/pull/16318))
|
||||
- Fix: support passing handler tools via registry ([#15613](https://github.com/NousResearch/hermes-agent/pull/15613))
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Tool System
|
||||
|
||||
### Plugin-first architecture
|
||||
- **Pluggable gateway platforms** — platforms can ship as plugins ([#17751](https://github.com/NousResearch/hermes-agent/pull/17751))
|
||||
- **Microsoft Teams as first plugin-shipped platform** ([#17828](https://github.com/NousResearch/hermes-agent/pull/17828))
|
||||
- **`pre_gateway_dispatch` hook** ([#15050](https://github.com/NousResearch/hermes-agent/pull/15050))
|
||||
- **`pre_approval_request` + `post_approval_response` hooks** ([#16776](https://github.com/NousResearch/hermes-agent/pull/16776))
|
||||
- **`duration_ms` on `post_tool_call`** (inspired by Claude Code 2.1.119) ([#15429](https://github.com/NousResearch/hermes-agent/pull/15429))
|
||||
- **Bundled plugins**: Spotify ([#15174](https://github.com/NousResearch/hermes-agent/pull/15174)), Google Meet ([#16364](https://github.com/NousResearch/hermes-agent/pull/16364)), Langfuse observability ([#16917](https://github.com/NousResearch/hermes-agent/pull/16917)), hermes-achievements ([#17754](https://github.com/NousResearch/hermes-agent/pull/17754))
|
||||
- **Page-scoped plugin slots for built-in dashboard pages** ([#15658](https://github.com/NousResearch/hermes-agent/pull/15658))
|
||||
- **Declarative plugin installation for NixOS module** (@alt-glitch) ([#15953](https://github.com/NousResearch/hermes-agent/pull/15953))
|
||||
|
||||
### Browser
|
||||
- **CDP supervisor** — dialog detection + response + cross-origin iframe eval ([#14540](https://github.com/NousResearch/hermes-agent/pull/14540))
|
||||
- **Auto-spawn local Chromium for LAN/localhost URLs** when cloud provider is configured ([#16136](https://github.com/NousResearch/hermes-agent/pull/16136))
|
||||
|
||||
### Execute code / Terminal
|
||||
- **Vercel Sandbox backend** for `execute_code` / terminal (@kshitijk4poor) ([#17445](https://github.com/NousResearch/hermes-agent/pull/17445))
|
||||
- **Collapse subagent `task_id`s to shared container** ([#16177](https://github.com/NousResearch/hermes-agent/pull/16177))
|
||||
- **Docker: run container as host user** to avoid root-owned bind mounts (@benbarclay) ([#17305](https://github.com/NousResearch/hermes-agent/pull/17305))
|
||||
- Fix: safely quote `~/` subpaths in wrapped `cd` commands ([#15394](https://github.com/NousResearch/hermes-agent/pull/15394))
|
||||
- Fix: close file descriptor in `LocalEnvironment._update_cwd` ([#17300](https://github.com/NousResearch/hermes-agent/pull/17300))
|
||||
- Fix: SSH — prevent tar from overwriting remote home dir permissions ([#17898](https://github.com/NousResearch/hermes-agent/pull/17898), [#17867](https://github.com/NousResearch/hermes-agent/pull/17867))
|
||||
|
||||
### Image generation
|
||||
- See Provider section for updates; no new image providers this window.
|
||||
|
||||
### TTS / Voice
|
||||
- **Pluggable TTS provider registry** under `tts.providers.<name>` ([#17843](https://github.com/NousResearch/hermes-agent/pull/17843))
|
||||
- **Piper** as native local TTS provider (closes #8508) ([#17885](https://github.com/NousResearch/hermes-agent/pull/17885))
|
||||
- **Voice mode CLI parity in the TUI** — VAD loop + TTS + crash forensics ([#14810](https://github.com/NousResearch/hermes-agent/pull/14810))
|
||||
- Fix: vision — use HERMES_HOME-based cache dir instead of cwd ([#17719](https://github.com/NousResearch/hermes-agent/pull/17719))
|
||||
|
||||
### Cron
|
||||
- **Honor `hermes tools` config for the cron platform** ([#14798](https://github.com/NousResearch/hermes-agent/pull/14798))
|
||||
- **Per-job `workdir`** — project-aware cron runs ([#15110](https://github.com/NousResearch/hermes-agent/pull/15110))
|
||||
- **`context_from` field** — chain cron job outputs ([#15606](https://github.com/NousResearch/hermes-agent/pull/15606))
|
||||
- Fix: promote `croniter` to a core dependency ([#17577](https://github.com/NousResearch/hermes-agent/pull/17577))
|
||||
|
||||
### Web search
|
||||
- **Expose `limit` for `web_search`** ([#16934](https://github.com/NousResearch/hermes-agent/pull/16934))
|
||||
|
||||
### Maps
|
||||
- Fix: include seconds in timezone UTC offset output ([#16300](https://github.com/NousResearch/hermes-agent/pull/16300))
|
||||
|
||||
### Approvals
|
||||
- **Hardline blocklist for unrecoverable commands** ([#15878](https://github.com/NousResearch/hermes-agent/pull/15878))
|
||||
- Perf: precompile DANGEROUS_PATTERNS and HARDLINE_PATTERNS ([#17206](https://github.com/NousResearch/hermes-agent/pull/17206))
|
||||
|
||||
### ACP
|
||||
- **Advertise and forward image prompts** ([#18030](https://github.com/NousResearch/hermes-agent/pull/18030))
|
||||
|
||||
### API Server
|
||||
- **POST `/v1/runs/{run_id}/stop`** (salvage of #15656) ([#15842](https://github.com/NousResearch/hermes-agent/pull/15842))
|
||||
- **Expose run status for external UIs** (#17085) ([#17458](https://github.com/NousResearch/hermes-agent/pull/17458))
|
||||
|
||||
### Nix
|
||||
- **Declarative plugin installation for NixOS module** (@alt-glitch) ([#15953](https://github.com/NousResearch/hermes-agent/pull/15953))
|
||||
- Fix: use `--rebuild` in fix-lockfiles to bypass cached FOD store paths ([#15444](https://github.com/NousResearch/hermes-agent/pull/15444))
|
||||
- Fix: `extraPackages` now actually works via per-user profile ([#17047](https://github.com/NousResearch/hermes-agent/pull/17047))
|
||||
- Fix: refresh web/ npm-deps hash to unblock main builds ([#17174](https://github.com/NousResearch/hermes-agent/pull/17174))
|
||||
- Fix: replace magic-nix-cache with Cachix ([#17928](https://github.com/NousResearch/hermes-agent/pull/17928))
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ TUI
|
||||
|
||||
### New features
|
||||
- **LaTeX rendering** (@austinpickett) ([#17175](https://github.com/NousResearch/hermes-agent/pull/17175))
|
||||
- **`/reload` .env hot-reload** — ported from the classic CLI ([#17286](https://github.com/NousResearch/hermes-agent/pull/17286))
|
||||
- **Pluggable busy-indicator styles** (@OutThisLife, #13610) ([#17150](https://github.com/NousResearch/hermes-agent/pull/17150))
|
||||
- **Opt-in auto-resume of the most recent session** (@OutThisLife) ([#17130](https://github.com/NousResearch/hermes-agent/pull/17130))
|
||||
- **Expanded light-terminal auto-detection** — `HERMES_TUI_THEME` + background hex (@OutThisLife) ([#17113](https://github.com/NousResearch/hermes-agent/pull/17113))
|
||||
- **Delete sessions from `/resume` picker with `d`** (@OutThisLife) ([#17668](https://github.com/NousResearch/hermes-agent/pull/17668))
|
||||
- **Line-by-line scroll on modified mouse wheel** (@OutThisLife) ([#17669](https://github.com/NousResearch/hermes-agent/pull/17669))
|
||||
- **Delete queued message while editing with ctrl-x / cancel with esc** (@OutThisLife) ([#16707](https://github.com/NousResearch/hermes-agent/pull/16707))
|
||||
- **Per-section visibility for the details accordion** (@OutThisLife) ([#14968](https://github.com/NousResearch/hermes-agent/pull/14968))
|
||||
- **Voice mode CLI parity** — VAD loop + TTS + crash forensics ([#14810](https://github.com/NousResearch/hermes-agent/pull/14810))
|
||||
- **Contextual first-touch hints ported to TUI** — `/busy`, `/verbose` ([#16054](https://github.com/NousResearch/hermes-agent/pull/16054))
|
||||
- **Mini help menu on `?` in the input field** (@ethernet8023) ([#18043](https://github.com/NousResearch/hermes-agent/pull/18043))
|
||||
|
||||
### Fixes
|
||||
- Fix: proactive mouse disable on ConPTY + `/mouse` toggle command (@kevin-ho, WSL2 ghost-mouse fix) ([#15488](https://github.com/NousResearch/hermes-agent/pull/15488))
|
||||
- Fix: restore skills search RPC ([#15870](https://github.com/NousResearch/hermes-agent/pull/15870))
|
||||
- Perf: cache text measurements across yoga flex re-passes ([#14818](https://github.com/NousResearch/hermes-agent/pull/14818))
|
||||
- Perf: stabilize long-session scrolling ([#15926](https://github.com/NousResearch/hermes-agent/pull/15926))
|
||||
- Perf: lazily seed virtual history heights ([#16523](https://github.com/NousResearch/hermes-agent/pull/16523))
|
||||
- Perf: cut visible cold start ~57% with lazy agent init ([#17190](https://github.com/NousResearch/hermes-agent/pull/17190))
|
||||
|
||||
---
|
||||
|
||||
## 🖱️ CLI & User Experience
|
||||
|
||||
### New commands
|
||||
- **`hermes -z <prompt>`** — non-interactive one-shot mode ([#15702](https://github.com/NousResearch/hermes-agent/pull/15702))
|
||||
- **`hermes -z` with `--model` / `--provider` / `HERMES_INFERENCE_MODEL`** ([#15704](https://github.com/NousResearch/hermes-agent/pull/15704))
|
||||
- **`hermes update --check`** preflight flag ([#15841](https://github.com/NousResearch/hermes-agent/pull/15841))
|
||||
- **`hermes fallback`** command for managing fallback providers ([#16052](https://github.com/NousResearch/hermes-agent/pull/16052))
|
||||
- **`/busy`** slash command for busy input mode ([#15382](https://github.com/NousResearch/hermes-agent/pull/15382))
|
||||
- **`/busy` input mode 'steer'** as a third option ([#16279](https://github.com/NousResearch/hermes-agent/pull/16279))
|
||||
- **`/btw` as alias for `/background`** ([#16053](https://github.com/NousResearch/hermes-agent/pull/16053))
|
||||
- **`/reload-skills`** slash command (salvage #17670) ([#17744](https://github.com/NousResearch/hermes-agent/pull/17744))
|
||||
- **Surface `/queue`, `/bg`, `/steer` in agent-running placeholder** ([#16118](https://github.com/NousResearch/hermes-agent/pull/16118))
|
||||
|
||||
### Setup / onboarding
|
||||
- **Auto-reconfigure on existing installs** ([#15879](https://github.com/NousResearch/hermes-agent/pull/15879))
|
||||
- **Contextual first-touch hints for `/busy` and `/verbose`** ([#16046](https://github.com/NousResearch/hermes-agent/pull/16046))
|
||||
- **Cost-saving tips from the April 30 tip-of-the-day** ([#17841](https://github.com/NousResearch/hermes-agent/pull/17841))
|
||||
- **Hyperlink startup banner title to the latest GitHub Release** ([#14945](https://github.com/NousResearch/hermes-agent/pull/14945))
|
||||
|
||||
### Update / backup
|
||||
- **Snapshot pairing data before `git pull`** ([#16383](https://github.com/NousResearch/hermes-agent/pull/16383))
|
||||
- **Auto-backup HERMES_HOME before `hermes update`** (opt-in, off by default) ([#16539](https://github.com/NousResearch/hermes-agent/pull/16539), [#16566](https://github.com/NousResearch/hermes-agent/pull/16566))
|
||||
- **Exclude `checkpoints/` from backups** ([#16572](https://github.com/NousResearch/hermes-agent/pull/16572))
|
||||
- **Exclude SQLite WAL/SHM/journal sidecars from backups** ([#16576](https://github.com/NousResearch/hermes-agent/pull/16576))
|
||||
- **Installer FHS layout for root installs on Linux** ([#15608](https://github.com/NousResearch/hermes-agent/pull/15608))
|
||||
- Fix: kill stale dashboards instead of warning ([#17832](https://github.com/NousResearch/hermes-agent/pull/17832))
|
||||
- Fix: show correct update status on nix-built hermes ([#17550](https://github.com/NousResearch/hermes-agent/pull/17550))
|
||||
|
||||
### Slash-command housekeeping
|
||||
- Refactor: drop `/provider`, `/plan` handler, and clean up slash registry ([#15047](https://github.com/NousResearch/hermes-agent/pull/15047))
|
||||
- Refactor: drop `persist_session` plumbing + fix broken `/btw` mid-turn bypass ([#16075](https://github.com/NousResearch/hermes-agent/pull/16075))
|
||||
|
||||
### OpenClaw migration (for folks coming from OpenClaw)
|
||||
- **Hardened OpenClaw import** — plan-first apply, redaction, pre-migration backup ([#16911](https://github.com/NousResearch/hermes-agent/pull/16911))
|
||||
- Fix: case-preserving brand rewrite + one-time `~/.openclaw` residue banner ([#16327](https://github.com/NousResearch/hermes-agent/pull/16327))
|
||||
- Fix: resolve `openclaw` workspace files from `agents.defaults.workspace` ([#16879](https://github.com/NousResearch/hermes-agent/pull/16879))
|
||||
- Fix: resolve model aliases against real OpenClaw catalog schema (salvage #16778) ([#16977](https://github.com/NousResearch/hermes-agent/pull/16977))
|
||||
|
||||
---
|
||||
|
||||
## 📊 Web Dashboard
|
||||
|
||||
- **Models tab** — rich per-model analytics ([#17745](https://github.com/NousResearch/hermes-agent/pull/17745))
|
||||
- **Configure main + auxiliary models from the Models page** ([#17802](https://github.com/NousResearch/hermes-agent/pull/17802))
|
||||
- **Dashboard Chat tab — xterm.js + JSON-RPC sidecar** (supersedes #12710 + #13379, @OutThisLife) ([#14890](https://github.com/NousResearch/hermes-agent/pull/14890))
|
||||
- **Dashboard layout refresh** (@austinpickett) ([#14899](https://github.com/NousResearch/hermes-agent/pull/14899))
|
||||
- **`--stop` and `--status` flags** on the dashboard CLI ([#17840](https://github.com/NousResearch/hermes-agent/pull/17840))
|
||||
- **Page-scoped plugin slots for built-in pages** ([#15658](https://github.com/NousResearch/hermes-agent/pull/15658))
|
||||
- Fix: replace all buttons for design system buttons ([#17007](https://github.com/NousResearch/hermes-agent/pull/17007))
|
||||
|
||||
---
|
||||
|
||||
## ⚡ Performance
|
||||
|
||||
- **TUI visible cold start cut ~57%** via lazy agent init ([#17190](https://github.com/NousResearch/hermes-agent/pull/17190))
|
||||
- **Lazy-import OpenAI, Anthropic, Firecrawl, account_usage** ([#17046](https://github.com/NousResearch/hermes-agent/pull/17046))
|
||||
- **mtime-cache `load_config()` and `read_raw_config()`** ([#17041](https://github.com/NousResearch/hermes-agent/pull/17041))
|
||||
- **Memoize `get_tool_definitions()` + TTL-cache `check_fn` results** ([#17098](https://github.com/NousResearch/hermes-agent/pull/17098))
|
||||
- **Precompile DANGEROUS_PATTERNS and HARDLINE_PATTERNS** ([#17206](https://github.com/NousResearch/hermes-agent/pull/17206))
|
||||
- **Cache Ink text measurements across yoga flex re-passes** ([#14818](https://github.com/NousResearch/hermes-agent/pull/14818))
|
||||
- **Stabilize long-session scrolling** ([#15926](https://github.com/NousResearch/hermes-agent/pull/15926))
|
||||
- **Lazily seed virtual history heights** ([#16523](https://github.com/NousResearch/hermes-agent/pull/16523))
|
||||
|
||||
---
|
||||
|
||||
## 🔒 Security & Reliability
|
||||
|
||||
- **Secret redaction off by default** — stops corrupting patches / API payloads with fake-key substitutions. Opt in via `redaction.enabled: true` ([#16794](https://github.com/NousResearch/hermes-agent/pull/16794))
|
||||
- **`[SYSTEM:` → `[IMPORTANT:`** in all user-injected markers (Azure content filter dodge) ([#16114](https://github.com/NousResearch/hermes-agent/pull/16114))
|
||||
- **Hardline blocklist for unrecoverable commands** ([#15878](https://github.com/NousResearch/hermes-agent/pull/15878))
|
||||
- **Canonical `mask_secret` helper; fix status.py DIM drift** ([#17207](https://github.com/NousResearch/hermes-agent/pull/17207))
|
||||
- **Sweep expired paste.rs uploads on a real timer** ([#16431](https://github.com/NousResearch/hermes-agent/pull/16431))
|
||||
- **Preserve symlinks during atomic file writes** ([#16980](https://github.com/NousResearch/hermes-agent/pull/16980))
|
||||
- **Probe `/dev/tty` by opening it, not bare existence** ([#17024](https://github.com/NousResearch/hermes-agent/pull/17024))
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Notable Bug Fixes
|
||||
|
||||
This window includes 360 `fix:` PRs. Selected highlights from across the stack:
|
||||
|
||||
- **Background review fork inherits parent's live runtime** — provider/model/creds now propagate correctly ([#16099](https://github.com/NousResearch/hermes-agent/pull/16099))
|
||||
- **Hindsight configurable `HINDSIGHT_TIMEOUT` env var** ([#15077](https://github.com/NousResearch/hermes-agent/pull/15077))
|
||||
- **Tools: normalize numeric entries + clear stale `no_mcp` in `_save_platform_tools`** ([#15607](https://github.com/NousResearch/hermes-agent/pull/15607))
|
||||
- **MCP: rewrite `definitions` refs to `$defs` in input schemas** — closes provider-side 400s
|
||||
- **Azure content filter compatibility** — renamed `[SYSTEM:` markers so Azure's content filter stops flagging them ([#16114](https://github.com/NousResearch/hermes-agent/pull/16114))
|
||||
- **Vision cache uses HERMES_HOME instead of cwd** ([#17719](https://github.com/NousResearch/hermes-agent/pull/17719))
|
||||
- **FTS5 search** — tool_name + tool_calls indexing with repair + migration ([#16914](https://github.com/NousResearch/hermes-agent/pull/16914))
|
||||
- **Streaming reasoning persists on assistant turns** ([#16892](https://github.com/NousResearch/hermes-agent/pull/16892))
|
||||
- **execute_code concurrent RPC serialization** (#17770) ([#17894](https://github.com/NousResearch/hermes-agent/pull/17894), [#17902](https://github.com/NousResearch/hermes-agent/pull/17902))
|
||||
- **Background reviewer scoped to memory + skills toolsets** — no more accidental web/shell escapes ([#16569](https://github.com/NousResearch/hermes-agent/pull/16569))
|
||||
- **Compression recovery** — retry on main before giving up; notify user when aux fails ([#16774](https://github.com/NousResearch/hermes-agent/pull/16774), [#16775](https://github.com/NousResearch/hermes-agent/pull/16775))
|
||||
- **`croniter` promoted to a core dependency** ([#17577](https://github.com/NousResearch/hermes-agent/pull/17577))
|
||||
- **Discord tool `limit` parameter coerced to int** before `min()` call ([#16319](https://github.com/NousResearch/hermes-agent/pull/16319))
|
||||
- **Yuanbao messaging platform entrance fix** ([#16880](https://github.com/NousResearch/hermes-agent/pull/16880))
|
||||
- **ACP advertise and forward image prompts** ([#18030](https://github.com/NousResearch/hermes-agent/pull/18030))
|
||||
- **DeepSeek / Kimi reasoning content isolation** across cross-provider histories (@Zjianru) ([#15749](https://github.com/NousResearch/hermes-agent/pull/15749), [#15762](https://github.com/NousResearch/hermes-agent/pull/15762))
|
||||
- **Preserve reasoning_content replay on DeepSeek v4 + Kimi/Moonshot thinking** ([#18045](https://github.com/NousResearch/hermes-agent/pull/18045))
|
||||
|
||||
The vast majority of the 360 fixes landed in the streaming/compression/tool-calling paths across all providers — DeepSeek, Kimi, Moonshot, GLM, Qwen, MiniMax, Gemini, Anthropic, OpenAI — alongside TUI polish (resize, scroll, sticky-prompt) and gateway platform-specific edge cases.
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing & CI
|
||||
|
||||
- Hermetic test parity (`scripts/run_tests.sh`) held across this window
|
||||
- **Microsoft Teams xdist collision guard** — prevents worker collisions when Teams platform tests run in parallel ([#17828](https://github.com/NousResearch/hermes-agent/pull/17828))
|
||||
- Chore: remove unused imports and dead locals (ruff F401, F841) ([#17010](https://github.com/NousResearch/hermes-agent/pull/17010))
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **Curator feature page** added to docs site ([#17563](https://github.com/NousResearch/hermes-agent/pull/17563))
|
||||
- **Document pin also blocking `skill_manage` writes** ([#17578](https://github.com/NousResearch/hermes-agent/pull/17578))
|
||||
- **Direct-URL skill install documented** across features, reference, guide, and `hermes-agent` skill ([#16355](https://github.com/NousResearch/hermes-agent/pull/16355))
|
||||
- **Hooks tutorial — build a BOOT.md startup checklist** (replaces the removed built-in hook) ([#17202](https://github.com/NousResearch/hermes-agent/pull/17202))
|
||||
- **ComfyUI docs: ask local vs cloud FIRST before hardware check** ([#17612](https://github.com/NousResearch/hermes-agent/pull/17612))
|
||||
- **Obliteratus skill: link YouTube video guide in SKILL.md** ([#15808](https://github.com/NousResearch/hermes-agent/pull/15808))
|
||||
- Per-skill docs pages generated for bundled + optional skills; ASCII art code blocks auto-wrapped ([#14929](https://github.com/NousResearch/hermes-agent/pull/14929), [#16497](https://github.com/NousResearch/hermes-agent/pull/16497))
|
||||
|
||||
---
|
||||
|
||||
## ⚖️ Removed / Reverted
|
||||
|
||||
- **Kanban multi-profile collaboration board** — landed in #16081, reverted in ([#16098](https://github.com/NousResearch/hermes-agent/pull/16098)) while the design is reworked
|
||||
- **computer-use cua-driver** — 3 preparatory PRs landed then were reverted in ([#16927](https://github.com/NousResearch/hermes-agent/pull/16927))
|
||||
- **BOOT.md built-in hook** removed ([#17093](https://github.com/NousResearch/hermes-agent/pull/17093)); the hooks tutorial ([#17202](https://github.com/NousResearch/hermes-agent/pull/17202)) shows how to build the same workflow yourself with a shell hook
|
||||
- **`/provider` + `/plan` slash commands dropped** ([#15047](https://github.com/NousResearch/hermes-agent/pull/15047))
|
||||
- **`flush_memories` removed entirely** ([#15696](https://github.com/NousResearch/hermes-agent/pull/15696))
|
||||
|
||||
---
|
||||
|
||||
## 👥 Contributors
|
||||
|
||||
### Core
|
||||
- **@teknium1** (Teknium)
|
||||
|
||||
### Top Community Contributors (by merged PR count since v0.11.0)
|
||||
|
||||
- **@OutThisLife** (Brooklyn) — 52 PRs · TUI — light-terminal detection + pluggable busy styles + auto-resume + session-delete from /resume + mouse-wheel scrolling + xterm.js dashboard Chat tab + cold-start cut + accordion polish
|
||||
- **@kshitijk4poor** — 12 PRs · LM Studio first-class provider (salvage), Vercel Sandbox backend, GMI Cloud salvage, bundled-by-default touchdesigner-mcp, many tool-call / reasoning fixes
|
||||
- **@helix4u** — 10 PRs · MCP schema robustness, assorted stability fixes
|
||||
- **@alt-glitch** — 8 PRs · trigram FTS5 CJK search, declarative Nix plugin install, matrix/feishu hints and fixes
|
||||
- **@ethernet8023** — 4 PRs
|
||||
- **@austinpickett** — 4 PRs · LaTeX rendering in TUI, dashboard layout refresh
|
||||
- **@benbarclay** — 3 PRs · Docker run-as-host-user so bind mounts don't get root-owned
|
||||
- **@vominh1919** — 2 PRs
|
||||
- **@stephenschoettler** — 2 PRs
|
||||
- **@kevin-ho** — ConPTY mouse-injection fix (#15488)
|
||||
- **@Zjianru** — cross-provider reasoning_content isolation + DeepSeek/Kimi empty-reasoning injection (#15749, #15762)
|
||||
- **@web3blind** — Telegram chat allowlists for groups and forums (#15027)
|
||||
- **@SHL0MS** — 9 new TouchDesigner-MCP reference docs (#16768)
|
||||
- **@0xDevNinja** — curator `restore_skill` nested-archive fix (#17951)
|
||||
- **@y0shua1ee** — curator `use` activity fix (#17953)
|
||||
|
||||
### Also contributing
|
||||
Salvaged or co-authored work from **@isaachuangGMICLOUD** (GMI Cloud), earlier upstream PRs from the original author of each salvage chain, and a long tail of one-shot fixes, documentation nudges, and skill contributions from the community.
|
||||
|
||||
### All Contributors (alphabetical, excluding @teknium1)
|
||||
|
||||
@0xbyt4, @0xharryriddle, @0xDevNinja, @0z1-ghb, @5park1e, @A-FdL-Prog, @aj-nt, @akhater, @alblez, @alexg0bot,
|
||||
@alexzhu0, @AllardQuek, @alt-glitch, @amanning3390, @amanuel2, @AndreKurait, @andrewhosf, @Andy283, @andyylin,
|
||||
@angel12, @AntAISecurityLab, @ash, @austinpickett, @badgerbees, @BadTechBandit, @Bartok9, @beenherebefore,
|
||||
@beesrsj2500, @BeliefanX, @benbarclay, @benjaminsehl, @BlackishGreen33, @bloodcarter, @BlueBirdBack,
|
||||
@briandevans, @brooklynnicholson, @bsgdigital, @buray, @bwjoke, @camaragon, @cdanis, @cgarwood82,
|
||||
@charles-brooks, @chen1749144759, @chengoak, @ching-kaching, @Contentment003111, @crayfish-ai, @CruxExperts,
|
||||
@cyclingwithelephants, @dandaka, @danklynn, @ddupont808, @dhabibi, @difujia, @dimitrovi, @dlkakbs,
|
||||
@dontcallmejames, @EKKOLearnAI, @emozilla, @ericnicolaides, @Erosika, @ethernet8023, @exiao, @Feranmi10,
|
||||
@flobo3, @foxion37, @georgeglessner, @georgex8001, @ghostmfr, @H-Ali13381, @HangGlidersRule, @harryplusplus,
|
||||
@haru398801, @heathley, @hejuntt1014, @hekaru-agent, @helix4u, @Heltman, @HenkDz, @heyitsaamir, @hharry11,
|
||||
@hhhonzik, @hhuang91, @HiddenPuppy, @htsh, @iamagenius00, @in-liberty420, @innocarpe, @irispillars, @iRonin,
|
||||
@isaachuangGMICLOUD, @Ito-69, @j3ffffff, @jackjin1997, @jakubkrcmar, @Jason2031, @JayGwod, @jerome-benoit,
|
||||
@johnncenae, @Kailigithub, @keiravoss94, @kevin-ho, @knockyai, @konsisumer, @kshitijk4poor, @kunlabs, @l0hde,
|
||||
@Leihb, @leoneparise, @LeonSGP43, @liizfq, @liuhao1024, @loongzhao, @lsdsjy, @luyao618, @ma-pony, @Magaav,
|
||||
@MagicRay1217, @math0r-be, @MattMaximo, @maxims-oss, @MaxyMoos, @maymuneth, @mcndjxlefnd, @memosr,
|
||||
@MestreY0d4-Uninter, @mewwts, @Mirac1eSky, @MorAlekss, @mrhwick, @mrunmayee17, @mssteuer, @Nanako0129,
|
||||
@nazirulhafiy, @Nerijusas, @Nicecsh, @nicoloboschi, @nightq, @ningfangbin, @octo-patch, @Octopus,
|
||||
@OutThisLife, @Paperclip, @pein892, @perlowja, @prasadus92, @qike-ms, @qiyin-code, @Readon, @ReginaldasR,
|
||||
@revaraver, @rfilgueiras, @rmoen, @romanornr, @rugvedS07, @rylena, @samrusani, @Sanjays2402, @sasha-id,
|
||||
@Satoshi-agi, @scheidti, @scotttrinh, @season179, @SeeYangZhi, @sgaofen, @shamork, @shannonsands, @SHL0MS,
|
||||
@simbam99, @Societus, @socrates1024, @Sonoyunchu, @sprmn24, @stephenschoettler, @tangyuanjc, @TechPrototyper,
|
||||
@tekgnosis-net, @ThomassJonax, @tmimmanuel, @tochukwuada, @Tosko4, @Tranquil-Flow, @twozle, @txbxxx,
|
||||
@UgwujaGeorge, @Versun, @vlwkaos, @voidborne-d, @vominh1919, @Wang-tianhao, @Wangshengyang2004, @web3blind,
|
||||
@westers, @Wysie, @xandersbell, @xiahu88988, @XieNBi, @xinbenlv, @xnbi, @y0shua1ee, @yatesjalex, @yes999zc,
|
||||
@yeyitech, @Yoimex, @YueLich, @Yukipukii1, @zhiyanliu, @zicochaos, @Zjianru, @zkl2333, @zons-zhaozhy,
|
||||
@ztexydt-cqh.
|
||||
|
||||
Also: @Siddharth Balyan, @YuShu.
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v2026.4.23...v2026.4.30](https://github.com/NousResearch/hermes-agent/compare/v2026.4.23...v2026.4.30)
|
||||
@@ -1,641 +0,0 @@
|
||||
# Hermes Agent v0.13.0 (v2026.5.7)
|
||||
|
||||
**Release Date:** May 7, 2026
|
||||
**Since v0.12.0:** 864 commits · 588 merged PRs · 829 files changed · 128,366 insertions · 282 issues closed (13 P0, 36 P1) · 295 community contributors (including co-authors)
|
||||
|
||||
> The Tenacity Release — Hermes Agent now finishes what it starts. Kanban ships as a durable multi-agent board (heartbeat, reclaim, zombie detection, auto-block on incomplete exit, per-task retries, hallucination recovery). `/goal` keeps the agent locked on a target across turns (Ralph loop). Checkpoints v2 rewrites state persistence with real pruning. Gateway auto-resumes interrupted sessions after restart. Cron grows a `no_agent` watchdog mode. A security wave closes 8 P0s — redaction is now ON by default, Discord role-allowlists are guild-scoped, WhatsApp rejects strangers by default, and TOCTOU windows close across auth.json and MCP OAuth. Google Chat becomes the 20th platform. Providers become a pluggable surface. Seven i18n locales ship.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Highlights
|
||||
|
||||
- **Multi-agent Kanban — delegate to an AI team that actually finishes** — Spin up a durable board, drop tasks on it, and let multiple Hermes workers pick them up, hand off, and close them out. Heartbeats, reclaim, zombie detection, retry budgets, and a hallucination gate keep the team honest. One install, many kanbans. ([#17805](https://github.com/NousResearch/hermes-agent/pull/17805), [#19653](https://github.com/NousResearch/hermes-agent/pull/19653), [#20232](https://github.com/NousResearch/hermes-agent/pull/20232), [#20332](https://github.com/NousResearch/hermes-agent/pull/20332), [#21330](https://github.com/NousResearch/hermes-agent/pull/21330), [#21183](https://github.com/NousResearch/hermes-agent/pull/21183), [#21214](https://github.com/NousResearch/hermes-agent/pull/21214))
|
||||
|
||||
- **`/goal` — the agent doesn't forget what you asked it to do** — Lock the agent onto a target and it stays on task across turns. The Ralph loop as a first-class primitive. ([#18262](https://github.com/NousResearch/hermes-agent/pull/18262), [#18275](https://github.com/NousResearch/hermes-agent/pull/18275), [#21287](https://github.com/NousResearch/hermes-agent/pull/21287))
|
||||
|
||||
- **Show it a video** — new `video_analyze` tool for native video understanding on Gemini and compatible multimodal models. (@alt-glitch) ([#19301](https://github.com/NousResearch/hermes-agent/pull/19301))
|
||||
|
||||
- **Clone a voice** — xAI Custom Voices lands as a TTS provider with voice cloning support. (@alt-glitch) ([#18776](https://github.com/NousResearch/hermes-agent/pull/18776))
|
||||
|
||||
- **Hermes speaks your language** — static gateway + CLI messages translate to 7 locales: Chinese, Japanese, German, Spanish, French, Ukrainian, and Turkish. Docs site gains a Chinese (zh-Hans) locale. ([#20231](https://github.com/NousResearch/hermes-agent/pull/20231), [#20329](https://github.com/NousResearch/hermes-agent/pull/20329), [#20467](https://github.com/NousResearch/hermes-agent/pull/20467), [#20474](https://github.com/NousResearch/hermes-agent/pull/20474), [#20430](https://github.com/NousResearch/hermes-agent/pull/20430), [#20431](https://github.com/NousResearch/hermes-agent/pull/20431))
|
||||
|
||||
- **Google Chat — the 20th messaging platform** — plus a generic platform-plugin hooks surface so third-party adapters drop in without touching core (IRC and Teams migrated). ([#21306](https://github.com/NousResearch/hermes-agent/pull/21306), [#21331](https://github.com/NousResearch/hermes-agent/pull/21331))
|
||||
|
||||
- **Sessions survive restarts** — gateway bounces mid-agent, `/update` restarts, source-file reloads — conversations auto-resume when the gateway comes back. ([#21192](https://github.com/NousResearch/hermes-agent/pull/21192))
|
||||
|
||||
- **Security wave — 8 P0 closures** — redaction ON by default, Discord role-allowlists guild-scoped (CVSS 8.1 cross-guild DM bypass closed), WhatsApp rejects strangers by default, TOCTOU windows closed across `auth.json` and MCP OAuth, browser enforces cloud-metadata SSRF floor, cron prompt-injection scans assembled skill content, `hermes debug share` redacts at upload. ([#21193](https://github.com/NousResearch/hermes-agent/pull/21193), [#21241](https://github.com/NousResearch/hermes-agent/pull/21241), [#21291](https://github.com/NousResearch/hermes-agent/pull/21291), [#21176](https://github.com/NousResearch/hermes-agent/pull/21176), [#21194](https://github.com/NousResearch/hermes-agent/pull/21194), [#21228](https://github.com/NousResearch/hermes-agent/pull/21228), [#21350](https://github.com/NousResearch/hermes-agent/pull/21350), [#19318](https://github.com/NousResearch/hermes-agent/pull/19318))
|
||||
|
||||
- **Checkpoints v2** — state persistence rewritten. Real pruning, disk guardrails, no more orphan shadow repos. ([#20709](https://github.com/NousResearch/hermes-agent/pull/20709))
|
||||
|
||||
- **The agent lints its own writes** — post-write delta lint on `write_file` + `patch`. Python, JSON, YAML, TOML. Syntax errors surface immediately instead of shipping downstream. ([#20191](https://github.com/NousResearch/hermes-agent/pull/20191))
|
||||
|
||||
- **`no_agent` cron mode — script-only watchdog** — cron jobs can now skip the agent entirely and just run a script. Empty stdout is silent, non-empty gets delivered verbatim. ([#19709](https://github.com/NousResearch/hermes-agent/pull/19709))
|
||||
|
||||
- **Platform allowlists everywhere** — `allowed_channels` / `allowed_chats` / `allowed_rooms` config across Slack, Telegram, Mattermost, Matrix, and DingTalk. ([#21251](https://github.com/NousResearch/hermes-agent/pull/21251))
|
||||
|
||||
- **Providers are now plugins** — `ProviderProfile` ABC + `plugins/model-providers/`. Drop in third-party providers without touching core. ([#20324](https://github.com/NousResearch/hermes-agent/pull/20324))
|
||||
|
||||
- **API server — long-term memory per session** — `X-Hermes-Session-Key` header gives memory providers a stable session identifier. ([#20199](https://github.com/NousResearch/hermes-agent/pull/20199))
|
||||
|
||||
- **MCP levels up** — SSE transport with OAuth forwarding, stale-pipe retries, image results surface as MEDIA tags instead of getting dropped, keepalive on long-lived lifecycle waits. ([#21227](https://github.com/NousResearch/hermes-agent/pull/21227), [#21323](https://github.com/NousResearch/hermes-agent/pull/21323), [#21289](https://github.com/NousResearch/hermes-agent/pull/21289), [#21328](https://github.com/NousResearch/hermes-agent/pull/21328), [#20209](https://github.com/NousResearch/hermes-agent/pull/20209))
|
||||
|
||||
- **Curator grows subcommands** — `hermes curator archive`, `prune`, `list-archived`. Manual `hermes curator run` is synchronous now — you see results without polling. ([#20200](https://github.com/NousResearch/hermes-agent/pull/20200), [#21236](https://github.com/NousResearch/hermes-agent/pull/21236), [#21216](https://github.com/NousResearch/hermes-agent/pull/21216))
|
||||
|
||||
- **ACP — `/steer` and `/queue`** — direct the in-flight agent or queue follow-ups from Zed, VS Code, or JetBrains. Plus atomic session persistence and reasoning-metadata preservation across restarts. (@HenkDz) ([#18114](https://github.com/NousResearch/hermes-agent/pull/18114), [#20279](https://github.com/NousResearch/hermes-agent/pull/20279), [#20296](https://github.com/NousResearch/hermes-agent/pull/20296), [#20433](https://github.com/NousResearch/hermes-agent/pull/20433))
|
||||
|
||||
- **TUI glow-up** — `/model` picker matches `hermes model` with inline auth (@austinpickett), collapsible startup banner sections (@kshitijk4poor), context-compression counter in the status bar. ([#18117](https://github.com/NousResearch/hermes-agent/pull/18117), [#20625](https://github.com/NousResearch/hermes-agent/pull/20625), [#21218](https://github.com/NousResearch/hermes-agent/pull/21218))
|
||||
|
||||
- **Dashboard grows up** — Plugins page (manage, enable/disable, auth status) (@austinpickett), Profiles management page (@vincez-hms-coder), sortable analytics tables, reverse-proxy support via `X-Forwarded-Prefix`, new `default-large` 18px theme. ([#18095](https://github.com/NousResearch/hermes-agent/pull/18095), [#16419](https://github.com/NousResearch/hermes-agent/pull/16419), [#18192](https://github.com/NousResearch/hermes-agent/pull/18192), [#21296](https://github.com/NousResearch/hermes-agent/pull/21296), [#20820](https://github.com/NousResearch/hermes-agent/pull/20820))
|
||||
|
||||
- **SearXNG + split web tools** — SearXNG ships as a native search-only backend; web tools now let you pick different backends per capability (search vs extract vs browse). (@kshitijk4poor) ([#20823](https://github.com/NousResearch/hermes-agent/pull/20823), [#20061](https://github.com/NousResearch/hermes-agent/pull/20061), [#20841](https://github.com/NousResearch/hermes-agent/pull/20841))
|
||||
|
||||
- **OpenRouter response caching** — explicit cache control for models that expose it. (@kshitijk4poor) ([#19132](https://github.com/NousResearch/hermes-agent/pull/19132))
|
||||
|
||||
- **`[[as_document]]` — skill media-routing directive** — skills can force the gateway to deliver output as a document on platforms that support it. ([#21210](https://github.com/NousResearch/hermes-agent/pull/21210))
|
||||
|
||||
- **`transform_llm_output` plugin hook** — new lifecycle hook that lets plugins reshape or filter LLM output before it hits the conversation. Useful for context-window reducers and content filters. ([#21235](https://github.com/NousResearch/hermes-agent/pull/21235))
|
||||
|
||||
- **Nous OAuth persists across profiles** — shared token store: sign in once, every profile inherits the session. ([#19712](https://github.com/NousResearch/hermes-agent/pull/19712))
|
||||
|
||||
- **QQBot — native approval keyboards** — feature parity with Telegram / Discord approval UX. Chunked upload, quoted attachments. ([#21342](https://github.com/NousResearch/hermes-agent/pull/21342), [#21353](https://github.com/NousResearch/hermes-agent/pull/21353))
|
||||
|
||||
- **6 new optional skills** — Shopify (Admin + Storefront GraphQL), here.now, shop-app personal shopping assistant, Anthropic financial-services bundle, kanban-video-orchestrator (@SHL0MS), searxng-search (@kshitijk4poor). ([#18116](https://github.com/NousResearch/hermes-agent/pull/18116), [#18170](https://github.com/NousResearch/hermes-agent/pull/18170), [#20702](https://github.com/NousResearch/hermes-agent/pull/20702), [#21180](https://github.com/NousResearch/hermes-agent/pull/21180), [#19281](https://github.com/NousResearch/hermes-agent/pull/19281), [#20841](https://github.com/NousResearch/hermes-agent/pull/20841))
|
||||
|
||||
- **New models** — `deepseek/deepseek-v4-pro`, `x-ai/grok-4.3`, `openrouter/owl-alpha` (free), `tencent/hy3-preview` (@Contentment003111), Arcee Trinity Large Thinking temperature + compression overrides. ([#20495](https://github.com/NousResearch/hermes-agent/pull/20495), [#20497](https://github.com/NousResearch/hermes-agent/pull/20497), [#18071](https://github.com/NousResearch/hermes-agent/pull/18071), [#21077](https://github.com/NousResearch/hermes-agent/pull/21077), [#20473](https://github.com/NousResearch/hermes-agent/pull/20473))
|
||||
|
||||
- **100 fresh CLI startup tips** — the random tip banner gets 100 new entries covering cron, kanban, curator, plugins, and lesser-known flags. ([#20168](https://github.com/NousResearch/hermes-agent/pull/20168))
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Multi-Agent Kanban (Durable)
|
||||
|
||||
### New — durable multi-profile collaboration board
|
||||
- **`feat(kanban): durable multi-profile collaboration board`** — post-revert reimplementation, multi-profile by design ([#17805](https://github.com/NousResearch/hermes-agent/pull/17805))
|
||||
- **Multi-project boards** — one install, many kanbans ([#19653](https://github.com/NousResearch/hermes-agent/pull/19653), [#19679](https://github.com/NousResearch/hermes-agent/pull/19679))
|
||||
- **Share board, workspaces, and worker logs across profiles** ([#19378](https://github.com/NousResearch/hermes-agent/pull/19378))
|
||||
- **Hallucination gate + recovery UX for worker-created-card claims** (closes #20017) ([#20232](https://github.com/NousResearch/hermes-agent/pull/20232))
|
||||
- **Generic diagnostics engine for task distress signals** ([#20332](https://github.com/NousResearch/hermes-agent/pull/20332))
|
||||
- **Per-task `max_retries` override** (supersedes #20972) ([#21330](https://github.com/NousResearch/hermes-agent/pull/21330))
|
||||
- **Multiline textarea for inline-create title** (salvage of #20970) ([#21243](https://github.com/NousResearch/hermes-agent/pull/21243))
|
||||
|
||||
### Kanban Dashboard
|
||||
- **Workspace kind + path inputs in inline create form** ([#19679](https://github.com/NousResearch/hermes-agent/pull/19679))
|
||||
- **Per-platform home-channel notification toggles** ([#19864](https://github.com/NousResearch/hermes-agent/pull/19864))
|
||||
- **Sharper home-channel toggle contrast + drop → running action** ([#19916](https://github.com/NousResearch/hermes-agent/pull/19916))
|
||||
- Fix: reject direct status transition to 'running' via dashboard API (salvage of #19554) ([#19705](https://github.com/NousResearch/hermes-agent/pull/19705))
|
||||
- Fix: dashboard board pin authoritative over server current file (#20879) ([#21230](https://github.com/NousResearch/hermes-agent/pull/21230))
|
||||
- Fix: treat dashboard event-stream cancellation as normal shutdown (#20790) ([#21222](https://github.com/NousResearch/hermes-agent/pull/21222))
|
||||
- Fix: filter dashboard board by selected tenant (#19817) ([#21349](https://github.com/NousResearch/hermes-agent/pull/21349))
|
||||
- Fix: code/pre styling theme-immune across all themes (#21086) ([#21247](https://github.com/NousResearch/hermes-agent/pull/21247))
|
||||
- Fix: reset `<code>` background inside dashboard board ([#20687](https://github.com/NousResearch/hermes-agent/pull/20687))
|
||||
- Fix: preserve dashboard completion summaries + add kanban edit (salvages #20016) ([#20195](https://github.com/NousResearch/hermes-agent/pull/20195))
|
||||
- Fix: avoid fragile failure-column renames (salvage #20848) (@kshitijk4poor) ([#20855](https://github.com/NousResearch/hermes-agent/pull/20855))
|
||||
|
||||
### Worker lifecycle + reliability
|
||||
- **Heartbeat + reclaim + zombie + retry-cap fixes** (#21147, #21141, #21169, #20881) ([#21183](https://github.com/NousResearch/hermes-agent/pull/21183))
|
||||
- **Auto-block workers that exit without completing + shutdown race** (#20894) ([#21214](https://github.com/NousResearch/hermes-agent/pull/21214))
|
||||
- **Detect darwin zombie workers** (salvages #20023) ([#20188](https://github.com/NousResearch/hermes-agent/pull/20188))
|
||||
- **Unify failure counter across spawn/timeout/crash outcomes** ([#20410](https://github.com/NousResearch/hermes-agent/pull/20410))
|
||||
- **Enforce worker task-ownership on destructive tool calls** ([#19713](https://github.com/NousResearch/hermes-agent/pull/19713))
|
||||
- **Drop worker identity claim from KANBAN_GUIDANCE** ([#19427](https://github.com/NousResearch/hermes-agent/pull/19427))
|
||||
- Fix: skip dispatch for tasks assigned to non-profile lanes (salvages #20105, #20134) ([#20165](https://github.com/NousResearch/hermes-agent/pull/20165))
|
||||
- Fix: include default profile in on-disk assignee enumeration (salvages #20123) ([#20170](https://github.com/NousResearch/hermes-agent/pull/20170))
|
||||
- Fix: ignore stale current board pointers (salvages #20063) ([#20183](https://github.com/NousResearch/hermes-agent/pull/20183))
|
||||
- Fix: profile discovery ignores HERMES_HOME in custom-root deployments (@jackey8616) ([#19020](https://github.com/NousResearch/hermes-agent/pull/19020))
|
||||
- Fix: allow orchestrator profiles to see kanban tools via toolsets config ([#19606](https://github.com/NousResearch/hermes-agent/pull/19606))
|
||||
|
||||
### Batch salvages
|
||||
- Tier-1 batch — metadata test, max_spawn config, run-id lifecycle guard (salvages #19522 #19556 #19829) ([#20440](https://github.com/NousResearch/hermes-agent/pull/20440))
|
||||
- Tier-2 batch — doctor, started_at, parent-guard, latest_summary, selects, linked-children ([#20448](https://github.com/NousResearch/hermes-agent/pull/20448))
|
||||
|
||||
### Documentation
|
||||
- Backfill multi-board refs in reference docs ([#19704](https://github.com/NousResearch/hermes-agent/pull/19704))
|
||||
- Document `/kanban` slash command ([#19584](https://github.com/NousResearch/hermes-agent/pull/19584))
|
||||
- Document recommended handoff evidence metadata (salvage #19512) ([#20415](https://github.com/NousResearch/hermes-agent/pull/20415))
|
||||
- Fix orchestrator + worker skill setup instructions (@helix4u) ([#20958](https://github.com/NousResearch/hermes-agent/pull/20958), [#20960](https://github.com/NousResearch/hermes-agent/pull/20960))
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Persistent Goals, Checkpoints & Session Durability
|
||||
|
||||
### `/goal` — persistent cross-turn goals (Ralph loop)
|
||||
- **`feat: /goal — persistent cross-turn goals`** ([#18262](https://github.com/NousResearch/hermes-agent/pull/18262))
|
||||
- **Docs page — Persistent Goals (/goal)** ([#18275](https://github.com/NousResearch/hermes-agent/pull/18275))
|
||||
- Fix: honor configured goal turn budget (salvage #19423) ([#21287](https://github.com/NousResearch/hermes-agent/pull/21287))
|
||||
|
||||
### Checkpoints v2
|
||||
- **Single-store rewrite with real pruning + disk guardrails** ([#20709](https://github.com/NousResearch/hermes-agent/pull/20709))
|
||||
|
||||
### Session durability
|
||||
- **Auto-resume interrupted sessions after gateway restart** (salvage #20888) ([#21192](https://github.com/NousResearch/hermes-agent/pull/21192))
|
||||
- **Preserve pending update prompts across restarts** ([#20160](https://github.com/NousResearch/hermes-agent/pull/20160))
|
||||
- **Preserve home-channel thread targets across restart notifications** (salvage #18440) ([#19271](https://github.com/NousResearch/hermes-agent/pull/19271))
|
||||
- **Preserve thread routing from cached live session sources** ([#21206](https://github.com/NousResearch/hermes-agent/pull/21206))
|
||||
- **Preserve assistant metadata when branching sessions** ([#18222](https://github.com/NousResearch/hermes-agent/pull/18222))
|
||||
- **Preserve thread routing for /update progress and prompts** ([#18193](https://github.com/NousResearch/hermes-agent/pull/18193))
|
||||
- **Preserve document type when merging queued events** ([#18215](https://github.com/NousResearch/hermes-agent/pull/18215))
|
||||
|
||||
---
|
||||
|
||||
## 🛡️ Security & Reliability
|
||||
|
||||
### Security hardening (8 P0 closures)
|
||||
- **Enable secret redaction by default** (#17691, #20785) ([#21193](https://github.com/NousResearch/hermes-agent/pull/21193))
|
||||
- **Discord — scope `DISCORD_ALLOWED_ROLES` to originating guild** (#12136, CVSS 8.1) ([#21241](https://github.com/NousResearch/hermes-agent/pull/21241))
|
||||
- **WhatsApp — reject strangers by default, never respond in self-chat** (#8389) ([#21291](https://github.com/NousResearch/hermes-agent/pull/21291))
|
||||
- **MCP OAuth — close TOCTOU window when saving credentials** ([#21176](https://github.com/NousResearch/hermes-agent/pull/21176))
|
||||
- **`hermes_cli/auth.py` — close TOCTOU window in credential writers** ([#21194](https://github.com/NousResearch/hermes-agent/pull/21194))
|
||||
- **Browser — enforce cloud-metadata SSRF floor in hybrid routing** (#16234) ([#21228](https://github.com/NousResearch/hermes-agent/pull/21228))
|
||||
- **`hermes debug share` — redact log content at upload time** (@GodsBoy) ([#19318](https://github.com/NousResearch/hermes-agent/pull/19318))
|
||||
- **Cron — scan assembled prompt including skill content for prompt injection** (#3968) ([#21350](https://github.com/NousResearch/hermes-agent/pull/21350))
|
||||
- **Restore .env/auth.json/state.db with 0600 perms** ([#19699](https://github.com/NousResearch/hermes-agent/pull/19699))
|
||||
- **SRI integrity for dashboard plugin scripts** (salvage #19389) ([#21277](https://github.com/NousResearch/hermes-agent/pull/21277))
|
||||
- **Bind Meet node server to localhost, restrict token file to owner read** ([#19597](https://github.com/NousResearch/hermes-agent/pull/19597))
|
||||
- **Extend sensitive-write target to cover shell RC and credential files** ([#19282](https://github.com/NousResearch/hermes-agent/pull/19282))
|
||||
- **Harden YOLO mode env parsing against quoted-bool strings** ([#18214](https://github.com/NousResearch/hermes-agent/pull/18214))
|
||||
- **OSV-Scanner CI + Dependabot for github-actions only** ([#20037](https://github.com/NousResearch/hermes-agent/pull/20037))
|
||||
|
||||
### Reliability — critical bug closures
|
||||
- **CLI crash on startup — `Invalid key 'c-S-c'`** (P0, prompt_toolkit doesn't support Shift modifier) ([#19895](https://github.com/NousResearch/hermes-agent/pull/19895), [#19919](https://github.com/NousResearch/hermes-agent/pull/19919))
|
||||
- **CLOSE_WAIT fd leak audit** — httpx keepalive + WhatsApp aiohttp leak + Feishu hygiene (#18451) ([#18766](https://github.com/NousResearch/hermes-agent/pull/18766))
|
||||
- **Gateway creates AIAgent with empty OpenRouter API key when OPENROUTER_API_KEY is missing** (#20982) — fallback providers correctly honored
|
||||
- **Background review + curator protected from overwriting bundled/hub skills** (#20273) ([#20194](https://github.com/NousResearch/hermes-agent/pull/20194))
|
||||
- **TUI compression continuation — ghost sessions with incomplete metadata** (#20001)
|
||||
- **`hermes mcp add` silently launches chat instead of registering MCP server** (#19785) ([#21204](https://github.com/NousResearch/hermes-agent/pull/21204))
|
||||
- **Background review agent runtime propagation** — provider/model/credentials now actually inherit from parent
|
||||
- **Inbound document host paths translated to container paths for Docker backend** (salvage #19048) ([#21184](https://github.com/NousResearch/hermes-agent/pull/21184))
|
||||
- **Matrix gateway race between auto-redaction and message delivery with high-speed models** (#19075)
|
||||
- **`/new` during active agent session never sends response on Telegram** (#18912)
|
||||
|
||||
---
|
||||
|
||||
## 📱 Messaging Platforms (Gateway)
|
||||
|
||||
### New platform
|
||||
- **Google Chat — 20th platform** + generic `env_enablement_fn` / `cron_deliver_env_var` platform-plugin hooks (IRC + Teams migrated) ([#21306](https://github.com/NousResearch/hermes-agent/pull/21306), [#21331](https://github.com/NousResearch/hermes-agent/pull/21331))
|
||||
|
||||
### Cross-platform
|
||||
- **`allowed_{channels,chats,rooms}` whitelist** — Slack (salvage #7401), Telegram, Mattermost, Matrix, DingTalk ([#21251](https://github.com/NousResearch/hermes-agent/pull/21251))
|
||||
- **Per-platform `gateway_restart_notification` flag** ([#20892](https://github.com/NousResearch/hermes-agent/pull/20892))
|
||||
- **`busy_ack_enabled` config — suppress ack messages** ([#18194](https://github.com/NousResearch/hermes-agent/pull/18194))
|
||||
- **Auto-delete slash-command system notices after TTL** ([#18266](https://github.com/NousResearch/hermes-agent/pull/18266))
|
||||
- **Opt-in cleanup of temporary progress bubbles** ([#21186](https://github.com/NousResearch/hermes-agent/pull/21186))
|
||||
- **`[[as_document]]` directive — skill media routing** (salvage #19069) ([#21210](https://github.com/NousResearch/hermes-agent/pull/21210))
|
||||
- **`hermes gateway list` — cross-profile status** (salvage #19129) ([#21225](https://github.com/NousResearch/hermes-agent/pull/21225))
|
||||
- **Auto-resume interrupted sessions after restart** (salvage #20888) ([#21192](https://github.com/NousResearch/hermes-agent/pull/21192))
|
||||
- **Atomic restart markers + Windows runtime-lock offset** (#17842) ([#18179](https://github.com/NousResearch/hermes-agent/pull/18179))
|
||||
- Fix: `config.yaml` wins over `.env` for agent/display/timezone settings ([#18764](https://github.com/NousResearch/hermes-agent/pull/18764))
|
||||
- Fix: auto-restart when source files change out from under us (#17648) ([#18409](https://github.com/NousResearch/hermes-agent/pull/18409))
|
||||
- Fix: use git HEAD SHA for stale-code check, not file mtimes ([#19740](https://github.com/NousResearch/hermes-agent/pull/19740))
|
||||
- Fix: shutdown + restart hygiene — drain timeout, false-fatal, success log ([#18761](https://github.com/NousResearch/hermes-agent/pull/18761))
|
||||
- Fix: preserve max_turns after env reload (salvage #19183) ([#21240](https://github.com/NousResearch/hermes-agent/pull/21240))
|
||||
- Fix: exclude ancestor PIDs from gateway process scan ([#19586](https://github.com/NousResearch/hermes-agent/pull/19586))
|
||||
- Fix: move quick-command alias dispatch before built-ins ([#19588](https://github.com/NousResearch/hermes-agent/pull/19588))
|
||||
- Fix: show other profiles in 'gateway status' to prevent confusion ([#19582](https://github.com/NousResearch/hermes-agent/pull/19582))
|
||||
- Fix: include external_dirs skills in Telegram/Discord slash commands (salvage #8790) ([#18741](https://github.com/NousResearch/hermes-agent/pull/18741))
|
||||
- Fix: match disabled/optional skills by frontmatter slug, not dir name ([#18753](https://github.com/NousResearch/hermes-agent/pull/18753))
|
||||
- Fix: read /status token totals from SessionDB (#17158) ([#18206](https://github.com/NousResearch/hermes-agent/pull/18206))
|
||||
- Fix: snapshot callback generation after agent binds it, not before ([#18219](https://github.com/NousResearch/hermes-agent/pull/18219))
|
||||
- Fix: re-inject topic-bound skill after /new or /reset ([#18205](https://github.com/NousResearch/hermes-agent/pull/18205))
|
||||
- Fix: isolate pending native image paths by session ([#18202](https://github.com/NousResearch/hermes-agent/pull/18202))
|
||||
- Fix: clear queued reload skills notes on new/resume/branch ([#19431](https://github.com/NousResearch/hermes-agent/pull/19431))
|
||||
- Fix: hide required-arg commands from Telegram menu ([#19400](https://github.com/NousResearch/hermes-agent/pull/19400))
|
||||
- Fix: bridge top-level `require_mention` to Telegram config ([#19429](https://github.com/NousResearch/hermes-agent/pull/19429))
|
||||
- Fix: suppress duplicate voice transcripts ([#19428](https://github.com/NousResearch/hermes-agent/pull/19428))
|
||||
- Fix: show friendly error when service is not installed ([#19707](https://github.com/NousResearch/hermes-agent/pull/19707))
|
||||
- Fix: read context_length from custom_providers in session info header ([#19708](https://github.com/NousResearch/hermes-agent/pull/19708))
|
||||
- Fix: preserve WSL interop PATH in systemd units ([#19867](https://github.com/NousResearch/hermes-agent/pull/19867))
|
||||
- Fix: handle planned service stops (salvage #19876) ([#19936](https://github.com/NousResearch/hermes-agent/pull/19936))
|
||||
- Fix: keep DoH-confirmed Telegram IPs that match system DNS (salvage #17043) ([#20175](https://github.com/NousResearch/hermes-agent/pull/20175))
|
||||
- Fix: load `reply_to_mode` from config.yaml for Discord + Telegram (salvage #17117) ([#20171](https://github.com/NousResearch/hermes-agent/pull/20171))
|
||||
- Fix: tolerate malformed HERMES_HUMAN_DELAY_* env vars (salvage #16933) ([#20217](https://github.com/NousResearch/hermes-agent/pull/20217))
|
||||
- Fix: deterministic thread eviction preserves newest entries (salvage #13639) ([#20285](https://github.com/NousResearch/hermes-agent/pull/20285))
|
||||
- Fix: don't dead-end setup wizard when only system-scope unit is installed ([#20905](https://github.com/NousResearch/hermes-agent/pull/20905))
|
||||
- Fix: wait for systemd restart readiness + harden Discord slash-command sync ([#20949](https://github.com/NousResearch/hermes-agent/pull/20949))
|
||||
- Fix: avoid duplicated Responses history (salvage #18995) ([#21185](https://github.com/NousResearch/hermes-agent/pull/21185))
|
||||
- Fix: surface bootstrap failures to stderr (salvage #21157) ([#21278](https://github.com/NousResearch/hermes-agent/pull/21278))
|
||||
- Fix: log agent task failures instead of silently losing usage data (salvage #21159) ([#21274](https://github.com/NousResearch/hermes-agent/pull/21274))
|
||||
- Fix: log runtime-status write failures with rate-limiting (salvage #21158) ([#21285](https://github.com/NousResearch/hermes-agent/pull/21285))
|
||||
- Fix: reset-failed before every fallback restart so the gateway can't get stranded ([#21371](https://github.com/NousResearch/hermes-agent/pull/21371))
|
||||
- Fix: Telegram — preserve `thread_id=1` for forum General typing indicator ([#21390](https://github.com/NousResearch/hermes-agent/pull/21390))
|
||||
- Fix: batch critical fixes — session resume, /new race, HA WebSocket scheme (@kshitijk4poor) ([#19182](https://github.com/NousResearch/hermes-agent/pull/19182))
|
||||
|
||||
### Telegram
|
||||
- **DM user-managed multi-session topics** (salvage of #19185) ([#19206](https://github.com/NousResearch/hermes-agent/pull/19206))
|
||||
|
||||
### Discord
|
||||
- **Message deletion action** (salvage #19052) ([#21197](https://github.com/NousResearch/hermes-agent/pull/21197))
|
||||
- Fix: allow `free_response_channels` to override `DISCORD_IGNORE_NO_MENTION` ([#19629](https://github.com/NousResearch/hermes-agent/pull/19629))
|
||||
|
||||
### Slack
|
||||
- Fix: ephemeral slash-command ack, private notice delivery, format_message fixes (@kshitijk4poor) ([#18198](https://github.com/NousResearch/hermes-agent/pull/18198))
|
||||
|
||||
### WhatsApp
|
||||
- Fix: load WhatsApp home channel from env overrides ([#18190](https://github.com/NousResearch/hermes-agent/pull/18190))
|
||||
|
||||
### Feishu
|
||||
- **Operator-configurable bot admission and mention policy** ([#18208](https://github.com/NousResearch/hermes-agent/pull/18208))
|
||||
- Fix: force text mode for markdown tables (salvage of #13723 by @WuTianyi123) ([#20275](https://github.com/NousResearch/hermes-agent/pull/20275))
|
||||
|
||||
### Matrix + Email
|
||||
- Fix: `/sethome` on Matrix and Email now persists across restarts ([#18272](https://github.com/NousResearch/hermes-agent/pull/18272))
|
||||
|
||||
### Teams
|
||||
- **Docs + feat: sidebar + threading with group-chat fallback** ([#20042](https://github.com/NousResearch/hermes-agent/pull/20042))
|
||||
|
||||
### Weixin
|
||||
- Fix: deduplicate Weixin messages by content fingerprint ([#19742](https://github.com/NousResearch/hermes-agent/pull/19742))
|
||||
|
||||
### QQBot
|
||||
- **Port SDK improvements in-tree — chunked upload, approval keyboards, quoted attachments** ([#21342](https://github.com/NousResearch/hermes-agent/pull/21342))
|
||||
- **Wire native tool-approval UX via inline keyboards** ([#21353](https://github.com/NousResearch/hermes-agent/pull/21353))
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Core Agent & Architecture
|
||||
|
||||
### Provider & Model Support
|
||||
|
||||
#### Pluggable providers
|
||||
- **ProviderProfile ABC + `plugins/model-providers/`** — inference providers are now a pluggable surface (salvage of #14424) ([#20324](https://github.com/NousResearch/hermes-agent/pull/20324))
|
||||
- **`list_picker_providers`** — credential-filtered picker (salvage #13561) ([#20298](https://github.com/NousResearch/hermes-agent/pull/20298))
|
||||
- **Remove `/provider` alias for `/model`** ([#20358](https://github.com/NousResearch/hermes-agent/pull/20358))
|
||||
- **Shared Hermes dotenv loader across CLI + plugins** (salvage #13660) ([#20281](https://github.com/NousResearch/hermes-agent/pull/20281))
|
||||
- **Nous OAuth persisted across profiles via shared token store** ([#19712](https://github.com/NousResearch/hermes-agent/pull/19712))
|
||||
|
||||
#### New models
|
||||
- `deepseek/deepseek-v4-pro` added to OpenRouter + Nous Portal ([#20495](https://github.com/NousResearch/hermes-agent/pull/20495))
|
||||
- `x-ai/grok-4.3` added to OpenRouter + Nous Portal ([#20497](https://github.com/NousResearch/hermes-agent/pull/20497))
|
||||
- `openrouter/owl-alpha` (free tier) added to curated OpenRouter list ([#18071](https://github.com/NousResearch/hermes-agent/pull/18071))
|
||||
- `tencent/hy3-preview` paid route on OpenRouter (@Contentment003111) ([#21077](https://github.com/NousResearch/hermes-agent/pull/21077))
|
||||
- Arcee Trinity Large Thinking — temperature + compression overrides ([#20473](https://github.com/NousResearch/hermes-agent/pull/20473))
|
||||
- Rename `x-ai/grok-4.20-beta` to `x-ai/grok-4.20` ([#19640](https://github.com/NousResearch/hermes-agent/pull/19640))
|
||||
- Demote Vercel AI Gateway to bottom of provider picker ([#18112](https://github.com/NousResearch/hermes-agent/pull/18112))
|
||||
|
||||
#### Provider configuration
|
||||
- **OpenRouter — response caching support** (@kshitijk4poor) ([#19132](https://github.com/NousResearch/hermes-agent/pull/19132))
|
||||
- **`image_gen.model` from config.yaml honored** (salvage #19376) ([#21273](https://github.com/NousResearch/hermes-agent/pull/21273))
|
||||
- Fix: honor runtime default model during delegate provider resolution (@johnncenae) ([#17587](https://github.com/NousResearch/hermes-agent/pull/17587))
|
||||
- Fix: avoid Bedrock credential probe in provider picker (@helix4u) ([#18998](https://github.com/NousResearch/hermes-agent/pull/18998))
|
||||
- Fix: drop stale env-var override of persisted provider for cron ([#19627](https://github.com/NousResearch/hermes-agent/pull/19627))
|
||||
- Fix: auxiliary curator api_key/base_url into runtime resolution ([#19421](https://github.com/NousResearch/hermes-agent/pull/19421))
|
||||
|
||||
### Agent Loop & Conversation
|
||||
- **`video_analyze` — native video understanding tool** (@alt-glitch) ([#19301](https://github.com/NousResearch/hermes-agent/pull/19301))
|
||||
- **Show context compression count in status bar** (CLI + TUI) ([#21218](https://github.com/NousResearch/hermes-agent/pull/21218))
|
||||
- **Isolate `get_tool_definitions` quiet_mode cache + dedup LCM injection** (#17335) ([#17889](https://github.com/NousResearch/hermes-agent/pull/17889))
|
||||
- Fix: warning-first tool-call loop guardrails ([#18227](https://github.com/NousResearch/hermes-agent/pull/18227))
|
||||
- Fix: break permanent empty-response loop from orphan tool-tail ([#21385](https://github.com/NousResearch/hermes-agent/pull/21385))
|
||||
- Fix: propagate ContextVars to concurrent tool worker threads (salvage #16660) ([#18123](https://github.com/NousResearch/hermes-agent/pull/18123))
|
||||
- Fix: surface self-improvement review summaries across CLI, TUI, and gateway ([#18073](https://github.com/NousResearch/hermes-agent/pull/18073))
|
||||
- Fix: serialize concurrent `hermes_tools` RPC calls from `execute_code` ([#17894](https://github.com/NousResearch/hermes-agent/pull/17894), [#17902](https://github.com/NousResearch/hermes-agent/pull/17902))
|
||||
- Fix: include system prompt + tool schemas in token estimates for compression ([#18265](https://github.com/NousResearch/hermes-agent/pull/18265))
|
||||
|
||||
### Compression
|
||||
- Fix: skip non-string tool content in dedup pass to prevent AttributeError ([#19398](https://github.com/NousResearch/hermes-agent/pull/19398))
|
||||
- Fix: reset `_summary_failure_cooldown_until` on session reset ([#19622](https://github.com/NousResearch/hermes-agent/pull/19622))
|
||||
- Fix: trigger fallback on timeout errors alongside model-unavailable errors ([#19665](https://github.com/NousResearch/hermes-agent/pull/19665))
|
||||
- Fix: `_prune_old_tool_results` boundary direction ([#19725](https://github.com/NousResearch/hermes-agent/pull/19725))
|
||||
- Fix: soften summary prompt for content filters (salvage #19456) ([#21302](https://github.com/NousResearch/hermes-agent/pull/21302))
|
||||
|
||||
### Delegate
|
||||
- Fix: inherit parent fallback_chain in `_build_child_agent` ([#19601](https://github.com/NousResearch/hermes-agent/pull/19601))
|
||||
- Fix: guard `_load_config()` against `delegation: null` in config.yaml ([#19662](https://github.com/NousResearch/hermes-agent/pull/19662))
|
||||
- Fix: inherit parent api_key when `delegation.base_url` set without `delegation.api_key` ([#19741](https://github.com/NousResearch/hermes-agent/pull/19741))
|
||||
- Fix: expand composite toolsets before intersection (salvage #19455) ([#21300](https://github.com/NousResearch/hermes-agent/pull/21300))
|
||||
- Fix: correct ACP docs — Claude Code CLI has no --acp flag (salvage #19058) ([#21201](https://github.com/NousResearch/hermes-agent/pull/21201))
|
||||
|
||||
### Session & Memory
|
||||
- **Hindsight — probe API for `update_mode='append'` to dedupe across processes** (@nicoloboschi) ([#20222](https://github.com/NousResearch/hermes-agent/pull/20222))
|
||||
|
||||
### Curator
|
||||
- **`hermes curator archive` and `prune` subcommands** ([#20200](https://github.com/NousResearch/hermes-agent/pull/20200))
|
||||
- **`hermes curator list-archived`** (#20651) ([#21236](https://github.com/NousResearch/hermes-agent/pull/21236))
|
||||
- **Synchronous manual `hermes curator run`** (#20555) ([#21216](https://github.com/NousResearch/hermes-agent/pull/21216))
|
||||
- Fix: preserve `last_report_path` in state ([#18169](https://github.com/NousResearch/hermes-agent/pull/18169))
|
||||
- Fix: rewrite cron job skill refs after consolidation ([#18253](https://github.com/NousResearch/hermes-agent/pull/18253))
|
||||
- Fix: defer first run + `--dry-run` preview (#18373) ([#18389](https://github.com/NousResearch/hermes-agent/pull/18389))
|
||||
- Fix: authoritative `absorbed_into` on delete + restore cron skill links on rollback (#18671) ([#18731](https://github.com/NousResearch/hermes-agent/pull/18731))
|
||||
- Fix: prevent false-positive consolidation from substring matching ([#19573](https://github.com/NousResearch/hermes-agent/pull/19573))
|
||||
- Fix: only mark agent-created for background-review sediment ([#19621](https://github.com/NousResearch/hermes-agent/pull/19621))
|
||||
- Fix: protect hub skills by frontmatter name ([#20194](https://github.com/NousResearch/hermes-agent/pull/20194))
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Tool System
|
||||
|
||||
### File tools
|
||||
- **Post-write delta lint on `write_file` + `patch`** — in-proc linters for Python, JSON, YAML, TOML ([#20191](https://github.com/NousResearch/hermes-agent/pull/20191))
|
||||
|
||||
### Cron
|
||||
- **`no_agent` mode — script-only cron jobs (watchdog pattern)** ([#19709](https://github.com/NousResearch/hermes-agent/pull/19709))
|
||||
- **`context_from` chaining docs** (salvage #15724) ([#20394](https://github.com/NousResearch/hermes-agent/pull/20394))
|
||||
- Fix: treat non-dict origin as missing instead of crashing tick ([#19283](https://github.com/NousResearch/hermes-agent/pull/19283))
|
||||
- Fix: bump skill usage when cron jobs load skills ([#19433](https://github.com/NousResearch/hermes-agent/pull/19433))
|
||||
- Fix: recover null `next_run_at` jobs ([#19576](https://github.com/NousResearch/hermes-agent/pull/19576))
|
||||
- Fix: skip AI call when prerun script produces no output ([#19628](https://github.com/NousResearch/hermes-agent/pull/19628))
|
||||
- Fix: expand config.yaml refs during job execution ([#19872](https://github.com/NousResearch/hermes-agent/pull/19872))
|
||||
- Fix: serialize `get_due_jobs` writes to prevent parallel state corruption ([#19874](https://github.com/NousResearch/hermes-agent/pull/19874))
|
||||
- Fix: initialize MCP servers before constructing the cron AIAgent ([#21354](https://github.com/NousResearch/hermes-agent/pull/21354))
|
||||
|
||||
### MCP
|
||||
- **SSE transport support** (salvage #19135) ([#21227](https://github.com/NousResearch/hermes-agent/pull/21227))
|
||||
- **Forward OAuth auth + bump `sse_read_timeout` on SSE transport** ([#21323](https://github.com/NousResearch/hermes-agent/pull/21323))
|
||||
- **Retry stale pipe transport failures as session-expired** ([#21289](https://github.com/NousResearch/hermes-agent/pull/21289))
|
||||
- **Surface image tool results as MEDIA tags instead of dropping them** ([#21328](https://github.com/NousResearch/hermes-agent/pull/21328))
|
||||
- **Periodic keepalive to `_wait_for_lifecycle_event`** (salvage #17016) ([#20209](https://github.com/NousResearch/hermes-agent/pull/20209))
|
||||
- Fix: reconnect on terminated sessions ([#19380](https://github.com/NousResearch/hermes-agent/pull/19380))
|
||||
- Fix: decouple AnyUrl import from mcp dependency ([#19695](https://github.com/NousResearch/hermes-agent/pull/19695))
|
||||
- Fix: `mcp add --command` gets distinct argparse dest ([#21204](https://github.com/NousResearch/hermes-agent/pull/21204))
|
||||
- Fix: clear stale thread interrupt before MCP discovery ([#21276](https://github.com/NousResearch/hermes-agent/pull/21276))
|
||||
- Fix: report configured timeout in MCP call errors ([#21281](https://github.com/NousResearch/hermes-agent/pull/21281))
|
||||
- Fix: include exception type in error messages when str(exc) is empty (salvage #19425) ([#21292](https://github.com/NousResearch/hermes-agent/pull/21292))
|
||||
- Fix: re-raise CancelledError explicitly in `MCPServerTask.run` ([#21318](https://github.com/NousResearch/hermes-agent/pull/21318))
|
||||
- Fix: coerce numeric tool args defensively in `mcp_serve` ([#21329](https://github.com/NousResearch/hermes-agent/pull/21329))
|
||||
- Fix: gate utility stubs on server-advertised capabilities ([#21347](https://github.com/NousResearch/hermes-agent/pull/21347))
|
||||
|
||||
### Browser
|
||||
- Fix: allow explicit CDP override without local agent-browser ([#19670](https://github.com/NousResearch/hermes-agent/pull/19670))
|
||||
- Fix: inject `--no-sandbox` for root + AppArmor userns restrictions ([#19747](https://github.com/NousResearch/hermes-agent/pull/19747))
|
||||
- Fix: tighten Lightpanda fallback edge cases (@kshitijk4poor) ([#20672](https://github.com/NousResearch/hermes-agent/pull/20672))
|
||||
|
||||
### Web tools
|
||||
- **Per-capability backend selection — search/extract split** (@kshitijk4poor) ([#20061](https://github.com/NousResearch/hermes-agent/pull/20061))
|
||||
- **SearXNG native search-only backend** (@kshitijk4poor) ([#20823](https://github.com/NousResearch/hermes-agent/pull/20823))
|
||||
|
||||
### Approval / Tool gating
|
||||
- Fix: wake blocked gateway approvals on session cleanup ([#18171](https://github.com/NousResearch/hermes-agent/pull/18171))
|
||||
- Fix: harden YOLO mode env parsing against quoted-bool strings ([#18214](https://github.com/NousResearch/hermes-agent/pull/18214))
|
||||
- Fix: extend sensitive write target to cover shell RC and credential files ([#19282](https://github.com/NousResearch/hermes-agent/pull/19282))
|
||||
|
||||
---
|
||||
|
||||
## 🔌 Plugin System
|
||||
|
||||
- **`transform_llm_output` plugin hook** (salvage of #20813) ([#21235](https://github.com/NousResearch/hermes-agent/pull/21235))
|
||||
- **Document `env_enablement_fn` + `cron_deliver_env_var` platform-plugin hooks** ([#21331](https://github.com/NousResearch/hermes-agent/pull/21331))
|
||||
- **Pluggable surfaces coverage — model-provider guide, full plugin map, opt-in fix** ([#20749](https://github.com/NousResearch/hermes-agent/pull/20749))
|
||||
- **Plugin-authoring gaps — image-gen provider guide + publishing a skill tap** ([#20800](https://github.com/NousResearch/hermes-agent/pull/20800))
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Skills Ecosystem
|
||||
|
||||
### New optional skills
|
||||
- **Shopify** — Admin + Storefront GraphQL optional skill ([#18116](https://github.com/NousResearch/hermes-agent/pull/18116))
|
||||
- **here.now** — optional skill ([#18170](https://github.com/NousResearch/hermes-agent/pull/18170))
|
||||
- **shop-app** — personal shopping assistant (optional) ([#20702](https://github.com/NousResearch/hermes-agent/pull/20702))
|
||||
- **Anthropic financial-services bundle** — ported as optional finance skills ([#21180](https://github.com/NousResearch/hermes-agent/pull/21180))
|
||||
- **kanban-video-orchestrator** — creative optional skill (@SHL0MS) ([#19281](https://github.com/NousResearch/hermes-agent/pull/19281))
|
||||
- **searxng-search** — optional skill + Web Search + Extract docs page (@kshitijk4poor) ([#20841](https://github.com/NousResearch/hermes-agent/pull/20841), [#20844](https://github.com/NousResearch/hermes-agent/pull/20844))
|
||||
|
||||
### Skill UX
|
||||
- **Linear skill — add Documents support + Python helper script** ([#20752](https://github.com/NousResearch/hermes-agent/pull/20752))
|
||||
- **Modernize Obsidian skill to use file tools** (salvage #19332) ([#20413](https://github.com/NousResearch/hermes-agent/pull/20413))
|
||||
- **Default custom tool creation to plugins** (@kshitijk4poor) ([#19755](https://github.com/NousResearch/hermes-agent/pull/19755))
|
||||
- **skill_commands cache — rescan on platform scope changes** (salvage #14570 by @LeonSGP43) ([#18739](https://github.com/NousResearch/hermes-agent/pull/18739))
|
||||
- **Skills — additional rescan paths in skill_commands cache** (salvage #19042) ([#21181](https://github.com/NousResearch/hermes-agent/pull/21181))
|
||||
- Fix: regression tests for non-dict metadata in `extract_skill_conditions` ([#18213](https://github.com/NousResearch/hermes-agent/pull/18213))
|
||||
- Docs: explain restoring bundled skills (salvage #19254) ([#20404](https://github.com/NousResearch/hermes-agent/pull/20404))
|
||||
- Docs: document `hermes skills reset` subcommand (salvage #11544) ([#20395](https://github.com/NousResearch/hermes-agent/pull/20395))
|
||||
- Docs: himalaya v1.2.0 `folder.aliases` syntax ([#19882](https://github.com/NousResearch/hermes-agent/pull/19882))
|
||||
- Point agent at `hermes-agent` skill + docs site sync ([#20390](https://github.com/NousResearch/hermes-agent/pull/20390))
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ CLI & User Experience
|
||||
|
||||
### CLI
|
||||
- **`/new` accepts optional session name argument** (salvage of #19555) ([#19637](https://github.com/NousResearch/hermes-agent/pull/19637))
|
||||
- **100 new CLI startup tips** ([#20168](https://github.com/NousResearch/hermes-agent/pull/20168))
|
||||
- **`display.language` — static message translation** (zh/ja/de/es) ([#20231](https://github.com/NousResearch/hermes-agent/pull/20231))
|
||||
- **French (fr) locale** (@Foolafroos) ([#20329](https://github.com/NousResearch/hermes-agent/pull/20329))
|
||||
- **Ukrainian (uk) locale** ([#20467](https://github.com/NousResearch/hermes-agent/pull/20467))
|
||||
- **Turkish (tr) locale** ([#20474](https://github.com/NousResearch/hermes-agent/pull/20474))
|
||||
- Fix: recover classic CLI output after resize (@helix4u) ([#20444](https://github.com/NousResearch/hermes-agent/pull/20444))
|
||||
- Fix: complete absolute paths as paths (@helix4u) ([#19930](https://github.com/NousResearch/hermes-agent/pull/19930))
|
||||
- Fix: resolve lazy session creation regressions (#18370 fallout) (@alt-glitch) ([#20363](https://github.com/NousResearch/hermes-agent/pull/20363))
|
||||
- Fix: local backend CLI always uses launch directory (@alt-glitch) ([#19334](https://github.com/NousResearch/hermes-agent/pull/19334))
|
||||
- Refactor: drop dead c-S-c key binding (follow-up to #19895) ([#19919](https://github.com/NousResearch/hermes-agent/pull/19919))
|
||||
|
||||
### TUI (Ink)
|
||||
- **`/model` picker overhaul to match `hermes model` with inline auth** (@austinpickett) ([#18117](https://github.com/NousResearch/hermes-agent/pull/18117))
|
||||
- **Collapsible sections in startup banner** — skills, system prompt, MCP (@kshitijk4poor) ([#20625](https://github.com/NousResearch/hermes-agent/pull/20625))
|
||||
- **Show context compression count in status bar** ([#21218](https://github.com/NousResearch/hermes-agent/pull/21218))
|
||||
- Perf: reduce overlay render churn with focused selectors (@OutThisLife) ([#20393](https://github.com/NousResearch/hermes-agent/pull/20393))
|
||||
- Fix: restore voice push-to-talk parity (salvage of #16189 by @Montbra) (@OutThisLife) ([#20897](https://github.com/NousResearch/hermes-agent/pull/20897))
|
||||
- Fix: kanban button (@austinpickett) ([#18358](https://github.com/NousResearch/hermes-agent/pull/18358))
|
||||
|
||||
### Dashboard
|
||||
- **Plugins page — manage, enable/disable, auth status** (@austinpickett) ([#18095](https://github.com/NousResearch/hermes-agent/pull/18095))
|
||||
- **Profiles management page** (@vincez-hms-coder) ([#16419](https://github.com/NousResearch/hermes-agent/pull/16419))
|
||||
- **Interactive column sorting in analytics tables** ([#18192](https://github.com/NousResearch/hermes-agent/pull/18192))
|
||||
- **`default-large` built-in theme with 18px base size** ([#20820](https://github.com/NousResearch/hermes-agent/pull/20820))
|
||||
- **Support serving under URL prefix via `X-Forwarded-Prefix`** (salvage #19450) ([#21296](https://github.com/NousResearch/hermes-agent/pull/21296))
|
||||
- **Launch dashboard as side-process via `HERMES_DASHBOARD=1` in Docker** (@benbarclay) ([#19540](https://github.com/NousResearch/hermes-agent/pull/19540))
|
||||
- Fix: dashboard theme layout shift (@AllardQuek) ([#17232](https://github.com/NousResearch/hermes-agent/pull/17232))
|
||||
- Fix: gateway model picker current context (@helix4u) ([#20513](https://github.com/NousResearch/hermes-agent/pull/20513))
|
||||
|
||||
### Update + setup
|
||||
- **`hermes update --yes/-y` to skip interactive prompts** ([#18261](https://github.com/NousResearch/hermes-agent/pull/18261))
|
||||
- **Restart manual profile gateways after update** ([#18178](https://github.com/NousResearch/hermes-agent/pull/18178))
|
||||
|
||||
### Profiles
|
||||
- **`--no-skills` flag for empty profile creation** ([#20986](https://github.com/NousResearch/hermes-agent/pull/20986))
|
||||
|
||||
---
|
||||
|
||||
## 🎵 Voice, Image & Media
|
||||
|
||||
- **xAI Custom Voices — voice cloning** (@alt-glitch) ([#18776](https://github.com/NousResearch/hermes-agent/pull/18776))
|
||||
- **Achievements — share card render on unlocked badges** ([#19657](https://github.com/NousResearch/hermes-agent/pull/19657))
|
||||
- **Refresh systemd unit on gateway boot (not just start/restart)** (@alt-glitch) ([#19684](https://github.com/NousResearch/hermes-agent/pull/19684))
|
||||
|
||||
---
|
||||
|
||||
## 🔗 API Server & Remote Access
|
||||
|
||||
- **`X-Hermes-Session-Key` header for long-term memory scoping** (closes #20060) ([#20199](https://github.com/NousResearch/hermes-agent/pull/20199))
|
||||
|
||||
---
|
||||
|
||||
## 🧰 ACP Adapter (VS Code / Zed / JetBrains)
|
||||
|
||||
- **`/steer` and `/queue` slash commands** (@HenkDz) ([#18114](https://github.com/NousResearch/hermes-agent/pull/18114))
|
||||
- Fix: translate Windows cwd for WSL sessions (salvage #18128) ([#18233](https://github.com/NousResearch/hermes-agent/pull/18233))
|
||||
- Fix: run `/steer` as a regular prompt on idle sessions ([#18258](https://github.com/NousResearch/hermes-agent/pull/18258))
|
||||
- Fix: route Zed thoughts to reasoning + polish tool/context rendering ([#19139](https://github.com/NousResearch/hermes-agent/pull/19139))
|
||||
- Fix: atomic session persistence via `replace_messages` (salvage #13675) ([#20279](https://github.com/NousResearch/hermes-agent/pull/20279))
|
||||
- Fix: preserve assistant reasoning metadata in session persistence (salvage #13575) ([#20296](https://github.com/NousResearch/hermes-agent/pull/20296))
|
||||
- Docs: update VS Code setup for ACP Client extension (salvage #12495) ([#20433](https://github.com/NousResearch/hermes-agent/pull/20433))
|
||||
|
||||
---
|
||||
|
||||
## 🐳 Docker
|
||||
|
||||
- **Launch dashboard as side-process via `HERMES_DASHBOARD=1`** (@benbarclay) ([#19540](https://github.com/NousResearch/hermes-agent/pull/19540))
|
||||
- **Refuse root gateway runs in official image** (salvage #19215) ([#21250](https://github.com/NousResearch/hermes-agent/pull/21250))
|
||||
- **Chown runtime `node_modules` trees to hermes user** (salvage #19303) ([#21267](https://github.com/NousResearch/hermes-agent/pull/21267))
|
||||
- Fix: exclude compose/profile runtime state from build context ([#19626](https://github.com/NousResearch/hermes-agent/pull/19626))
|
||||
- CI: don't cancel overlapping builds, guard `:latest` (@ethernet8023) ([#20890](https://github.com/NousResearch/hermes-agent/pull/20890))
|
||||
- Test: align Dockerfile contract tests with simplified TUI flow (salvage #19024) ([#21174](https://github.com/NousResearch/hermes-agent/pull/21174))
|
||||
- Docs: connect to local inference servers (vLLM, Ollama) (salvage #12335) ([#20407](https://github.com/NousResearch/hermes-agent/pull/20407))
|
||||
- Docs: document `API_SERVER_*` env vars (salvage #11758) ([#20409](https://github.com/NousResearch/hermes-agent/pull/20409))
|
||||
- Docs: clarify Docker terminal backend is a single persistent container ([#20003](https://github.com/NousResearch/hermes-agent/pull/20003))
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Notable Bug Fixes
|
||||
|
||||
### Agent
|
||||
- Fix: recover lazy session creation regressions (#18370 fallout) (@alt-glitch) ([#20363](https://github.com/NousResearch/hermes-agent/pull/20363))
|
||||
- Fix: propagate ContextVars to concurrent tool worker threads (salvage #16660) ([#18123](https://github.com/NousResearch/hermes-agent/pull/18123))
|
||||
- Fix: warning-first tool-call loop guardrails ([#18227](https://github.com/NousResearch/hermes-agent/pull/18227))
|
||||
- Fix: surface self-improvement review summaries across CLI, TUI, and gateway ([#18073](https://github.com/NousResearch/hermes-agent/pull/18073))
|
||||
|
||||
### Gateway streaming
|
||||
- Fix: harden StreamingConfig bool and numeric coercion (@simbam99) ([#16463](https://github.com/NousResearch/hermes-agent/pull/16463))
|
||||
|
||||
### Model
|
||||
- Fix: avoid Bedrock credential probe in provider picker (@helix4u) ([#18998](https://github.com/NousResearch/hermes-agent/pull/18998))
|
||||
|
||||
### Doctor
|
||||
- Fix: check global agent-browser when local install not found ([#19671](https://github.com/NousResearch/hermes-agent/pull/19671))
|
||||
- Test: kimi-coding-cn provider validation regression ([#19734](https://github.com/NousResearch/hermes-agent/pull/19734))
|
||||
|
||||
### Update
|
||||
- Fix: patch `isatty` on real streams to fix xdist-flaky `--yes` tests (salvage #19026) ([#21175](https://github.com/NousResearch/hermes-agent/pull/21175))
|
||||
- Fix: teach restart-mocks about the post-update survivor sweep (salvage #19031) ([#21177](https://github.com/NousResearch/hermes-agent/pull/21177))
|
||||
|
||||
### Auth
|
||||
- Fix: acp preserve assistant reasoning metadata ([#20296](https://github.com/NousResearch/hermes-agent/pull/20296))
|
||||
|
||||
### Redact
|
||||
- Fix: add `code_file` param to skip false-positive ENV/JSON patterns ([#19715](https://github.com/NousResearch/hermes-agent/pull/19715))
|
||||
|
||||
### Email
|
||||
- Fix: quoted-relative file-drop paths + Date header on tool email path ([#19646](https://github.com/NousResearch/hermes-agent/pull/19646))
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
- **ACP — accept prompt persistence kwargs in MCP E2E mocks** (@stephenschoettler) ([#18047](https://github.com/NousResearch/hermes-agent/pull/18047))
|
||||
- **Toolsets — include kanban in expected post-#17805 toolset assertions** (@briandevans) ([#18122](https://github.com/NousResearch/hermes-agent/pull/18122))
|
||||
- **Agent — cover max-iterations summary message sanitization** ([#19580](https://github.com/NousResearch/hermes-agent/pull/19580))
|
||||
- **run_agent — `-inf` and `nan` regression coverage for `_coerce_number`** ([#19703](https://github.com/NousResearch/hermes-agent/pull/19703))
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
### Major docs additions
|
||||
- **`llms.txt` + `llms-full.txt` — agent-friendly ingestion** ([#18276](https://github.com/NousResearch/hermes-agent/pull/18276))
|
||||
- **User Stories and Use Cases collage page** ([#18282](https://github.com/NousResearch/hermes-agent/pull/18282))
|
||||
- **Persistent Goals (/goal) feature page** ([#18275](https://github.com/NousResearch/hermes-agent/pull/18275))
|
||||
- **Windows (WSL2) guide expansion** — filesystem, networking, services, pitfalls ([#20748](https://github.com/NousResearch/hermes-agent/pull/20748))
|
||||
- **Chinese (zh-CN) README translation** (salvage #13508) ([#20431](https://github.com/NousResearch/hermes-agent/pull/20431))
|
||||
- **zh-Hans Docusaurus locale** + Tool Gateway / image-gen / WSL quickstart translations (salvage #11728) ([#20430](https://github.com/NousResearch/hermes-agent/pull/20430))
|
||||
- **Tool Gateway docs restructure** — lead with what it does, config moved to bottom ([#20827](https://github.com/NousResearch/hermes-agent/pull/20827))
|
||||
- **Quickstart — Onchain AI Garage Hermes tutorials playlist** ([#20192](https://github.com/NousResearch/hermes-agent/pull/20192))
|
||||
- **Open WebUI bootstrap script** (salvage #9566) ([#20427](https://github.com/NousResearch/hermes-agent/pull/20427))
|
||||
- **Local Ollama setup guide** (salvage #5842) ([#20426](https://github.com/NousResearch/hermes-agent/pull/20426))
|
||||
- **Google Gemini guide** (salvage #17450) ([#20401](https://github.com/NousResearch/hermes-agent/pull/20401))
|
||||
- **Custom model aliases for /model command** ([#20475](https://github.com/NousResearch/hermes-agent/pull/20475))
|
||||
- **Together/Groq/Perplexity cookbook via `custom_providers`** (salvage #15214) ([#20400](https://github.com/NousResearch/hermes-agent/pull/20400))
|
||||
- **Doubao speech integration examples** (TTS + STT) (salvage #18065) ([#20418](https://github.com/NousResearch/hermes-agent/pull/20418))
|
||||
- **WSL-to-Windows Chrome MCP bridge** (salvage #8313) ([#20428](https://github.com/NousResearch/hermes-agent/pull/20428))
|
||||
- **Hermes skills docs sync** — slash commands + durable-systems section ([#20390](https://github.com/NousResearch/hermes-agent/pull/20390))
|
||||
- **AGENTS.md — curator/cron/delegation/toolsets + fix plugin tree** ([#20226](https://github.com/NousResearch/hermes-agent/pull/20226))
|
||||
- **Bedrock quickstart entry + fallback comment + deployment link** (salvage #11093) ([#20397](https://github.com/NousResearch/hermes-agent/pull/20397))
|
||||
|
||||
### Docs polish
|
||||
- Collapse exploding skills tree to a single Skills node ([#18259](https://github.com/NousResearch/hermes-agent/pull/18259))
|
||||
- Clarify `session_search` auxiliary model docs ([#19593](https://github.com/NousResearch/hermes-agent/pull/19593))
|
||||
- Open WebUI Quick Setup gap fill ([#19654](https://github.com/NousResearch/hermes-agent/pull/19654))
|
||||
- Default custom tool creation to plugins (@kshitijk4poor) ([#19755](https://github.com/NousResearch/hermes-agent/pull/19755))
|
||||
- Clarify Telegram group chat troubleshooting (salvage #18672) ([#20416](https://github.com/NousResearch/hermes-agent/pull/20416))
|
||||
- Codex OAuth auth prerequisite clarification (salvage #18688) ([#20417](https://github.com/NousResearch/hermes-agent/pull/20417))
|
||||
- Discord Server Members Intent + SSRC-mapping drift + /voice join slash Choice (salvage #11350) ([#20411](https://github.com/NousResearch/hermes-agent/pull/20411))
|
||||
- Document `ctx.dispatch_tool()` (salvage #10955) ([#20391](https://github.com/NousResearch/hermes-agent/pull/20391))
|
||||
- Document `hermes webhook subscribe --deliver-only` (salvage #12612) ([#20392](https://github.com/NousResearch/hermes-agent/pull/20392))
|
||||
- Document `hermes import` reference (salvage #14711) ([#20396](https://github.com/NousResearch/hermes-agent/pull/20396))
|
||||
- Document per-provider TTS `max_text_length` caps (salvage #13825) ([#20389](https://github.com/NousResearch/hermes-agent/pull/20389))
|
||||
- Clarify supported prompt customization surfaces (salvage #19987) ([#20383](https://github.com/NousResearch/hermes-agent/pull/20383))
|
||||
- Correct `web_extract` summarizer timeout comment (salvage #20051) ([#20381](https://github.com/NousResearch/hermes-agent/pull/20381))
|
||||
- Fix fallback provider config paths (salvage #20033) ([#20382](https://github.com/NousResearch/hermes-agent/pull/20382))
|
||||
- Fix misleading RL install-extras claim (salvage #19080) ([#21213](https://github.com/NousResearch/hermes-agent/pull/21213))
|
||||
- Clarify API server tool execution locality (salvage #19117) ([#21223](https://github.com/NousResearch/hermes-agent/pull/21223))
|
||||
- Prefer `.venv` to match AGENTS.md and scripts/run_tests.sh (@xxxigm) ([#21334](https://github.com/NousResearch/hermes-agent/pull/21334))
|
||||
- Align tool discovery + test runner with AGENTS.md (@xxxigm) ([#20791](https://github.com/NousResearch/hermes-agent/pull/20791))
|
||||
- Align terminal-backend count and naming across docs and code (salvage #19044) ([#20402](https://github.com/NousResearch/hermes-agent/pull/20402))
|
||||
- Refresh stale platform counts (salvage #19053) ([#20403](https://github.com/NousResearch/hermes-agent/pull/20403))
|
||||
|
||||
---
|
||||
|
||||
## 👥 Contributors
|
||||
|
||||
### Core
|
||||
- **@teknium1** — salvage, triage, review, feature work, and release management
|
||||
|
||||
### Top Community Contributors
|
||||
|
||||
- **@kshitijk4poor** (21 PRs) — SearXNG native search backend, per-capability backend selection, collapsible TUI startup banner, Slack ephemeral ack + format fixes, Lightpanda fallback hardening, searxng-search optional skill + Web Search + Extract docs, default custom tool creation to plugins, kanban failure-column fix
|
||||
- **@alt-glitch** (13 PRs) — video_analyze tool, xAI Custom Voices (voice cloning), local-backend CLI launch-directory fix, lazy-session creation regression recovery, systemd unit refresh on gateway boot
|
||||
- **@OutThisLife** (9 PRs) — TUI perf — overlay render churn reduction, voice push-to-talk parity restoration (salvaging @Montbra)
|
||||
- **@helix4u** (6 PRs) — Classic CLI output recovery after resize, absolute-path TUI completion, gateway model picker current-context fix, Bedrock credential probe avoidance, kanban docs fixes
|
||||
- **@ethernet8023** (3 PRs) — Docker CI — don't cancel overlapping builds, :latest guard
|
||||
- **@benbarclay** (3 PRs) — Docker — launch dashboard as side-process via HERMES_DASHBOARD=1
|
||||
- **@austinpickett** (3 PRs) — Dashboard Plugins page, TUI /model picker overhaul with inline auth, kanban button fix
|
||||
- **@sprmn24** (2 PRs) — Contributor (2 PRs)
|
||||
- **@asheriif** (2 PRs) — Contributor (2 PRs)
|
||||
- **@xxxigm** (2 PRs) — Contributing docs — .venv preference and test runner alignment with AGENTS.md
|
||||
- **@stephenschoettler** (1 PR) — ACP — MCP E2E mock kwargs
|
||||
- **@vincez-hms-coder** (1 PR) — Dashboard — Profiles management page
|
||||
- **@cdanis** (1 PR) — Contributor
|
||||
- **@briandevans** (1 PR) — Toolsets test — kanban assertions post-#17805
|
||||
- **@heyitsaamir** (1 PR) — Contributor
|
||||
|
||||
### All Contributors
|
||||
|
||||
Thanks to everyone who contributed to v0.13.0 — commits, co-authored work, and salvaged PRs. 295 contributors in one week.
|
||||
|
||||
@0oAstro, @0xDevNinja, @0xharryriddle, @0xKingBack, @0xsir0000, @0xyg3n, @0z1-ghb, @abhinav11082001-stack,
|
||||
@acc001k, @acesjohnny, @adamludwin, @adybag14-cyber, @agentlinker, @agilejava, @ai-ag2026, @AJV20,
|
||||
@alanxchen85, @albert748, @AllardQuek, @alt-glitch, @altmazza0-star, @ambition0802, @amitgaur, @amroessam,
|
||||
@andrewhosf, @Asce66, @asheriif, @ashermorse, @asimons81, @Aslaaen, @Asunfly, @atongrun, @austinpickett,
|
||||
@banditburai, @barteqpl, @Bartok9, @Beandon13, @beardthelion, @beibi9966, @benbarclay, @binhnt92, @bjianhang,
|
||||
@BlackJulySnow, @bobashopcashier, @bogerman1, @Bongulielmi, @Brecht-H, @briandevans, @brooklynnicholson,
|
||||
@c3115644151, @camaragon, @CashWilliams, @CCClelo, @cdanis, @CES4751, @cg2aigc, @changchun989, @ChanlerDev,
|
||||
@CharlieKerfoot, @chengoak, @chenyunbo411, @chinadbo, @CIRWEL, @cixuuz, @cmcgrabby-hue, @colorcross,
|
||||
@Contentment003111, @CoreyNoDream, @counterposition, @curiouscleo, @DaniuXie, @deep-name, @dengtaoyuan450-a11y,
|
||||
@discodirector, @donramon77, @dpaluy, @ee-blog, @ehz0ah, @el-analista, @elmatadorgh, @EmelyanenkoK,
|
||||
@Emidomenge, @emozilla, @Es1la, @EthanGuo-coder, @etherman-os, @ethernet8023, @EvilDrag0n, @exxmen, @Fearvox,
|
||||
@Feranmi10, @firefly, @flobo3, @fmercurio, @Foolafroos, @formulahendry, @franksong2702, @ggnnggez, @GinWU05,
|
||||
@giwaov, @glesperance, @gnanirahulnutakki, @GodsBoy, @Gosuj, @Grey0202, @guillaumemeyer, @Gutslabs, @h0tp-ftw,
|
||||
@haidao1919, @halmisen, @happy5318, @hedirman, @helix4u, @hendrixfreire, @HenkDz, @hex-clawd, @heyitsaamir,
|
||||
@hharry11, @Hinotoi-agent, @holynn-q, @hrkzogw, @Hypn0sis, @Hypnus-Yuan, @ideathinklab01-source, @IMHaoyan,
|
||||
@Interstellar-code, @ishardo, @jacdevos, @jackey8616, @JanCong, @jasonoutland, @jatingodnani, @JayGwod,
|
||||
@jethac, @JezzaHehn, @JiaDe-Wu, @jjjojoj, @jkausel-ai, @John-tip, @johnncenae, @jrusso1020, @jslizar,
|
||||
@JTroyerOvermatch, @julysir, @Junass1, @JustinUssuri, @Kailigithub, @keepcalmqqf, @kiala9, @konsisumer,
|
||||
@kowenhaoai, @Krionex, @kshitijk4poor, @kyan12, @leavrcn, @leon7609, @LeonSGP43, @leprincep35700, @lhysdl,
|
||||
@likejudy, @lisanhu, @liu-collab, @liuguangyong93, @liuhao1024, @LucianoSP, @luoyuctl, @luyao618, @M3RCUR2Y,
|
||||
@maciekczech, @Magicray1217, @magicray1217, @MaHaoHao-ch, @malaiwah, @manateelazycat, @masonjames, @megastary,
|
||||
@memosr, @MichaelWDanko, @mikeyobrien, @millerc79, @Mind-Dragon, @mioimotoai-lgtm, @misery-hl, @molvikar,
|
||||
@momowind, @Montbra, @MottledShadow, @mrbob-git, @mrcharlesiv, @mrcoferland, @ms-alan, @mwnickerson,
|
||||
@nazirulhafiy, @nftpoetrist, @nicoloboschi, @nightq, @nikolay-bratanov, @NikolayGusev-astra, @nocturnum91,
|
||||
@noOne-list, @nouseman666, @novax635, @npmisantosh, @nudiltoys-cmyk, @olisikh, @oluwadareab12, @Oxidane-bot,
|
||||
@pama0227, @pander, @pasevin, @paul-tian, @pdonizete, @perlowja, @pingchesu, @PratikRai0101, @priveperfumes,
|
||||
@probepark, @QifengKuang, @quocanh261997, @qWaitCrypto, @qxxaa, @r266-tech, @rames-jusso, @revaraver,
|
||||
@Ricardo-M-L, @rob-maron, @Roy-oss1, @rxdxxxx, @SandroHub013, @Sanjays2402, @Sertug17, @shashwatgokhe,
|
||||
@shellybotmoyer, @SHL0MS, @SimbaKingjoe, @simbam99, @simplenamebox-ops, @socrates1024, @sonic-netizen,
|
||||
@sprmn24, @steezkelly, @stephen0110, @stephenschoettler, @stevenchanin, @stevenchouai, @stormhierta,
|
||||
@subtract0, @suncokret12, @swithek, @taeng0204, @TakeshiSawaguchi, @tangyuanjc, @TheEpTic, @thelumiereguy,
|
||||
@Tkander1715, @tmdgusya, @Tranquil-Flow, @TruaShamu, @UgwujaGeorge, @valda, @vincez-hms-coder, @VinVC,
|
||||
@vominh1919, @wabrent, @WadydX, @wanazhar, @WanderWang, @warabe1122, @web-dev0521, @WideLee, @willy-scr,
|
||||
@wmagev, @WuTianyi123, @wxst, @wysie, @Wysie, @xsfX20, @xxxigm, @xyiy001, @YanzhongSu, @ygd58, @Yoimex,
|
||||
@yuehei, @Yukipukii1, @yuqianma, @YX234, @zeejaytan, @zhanggttry, @zhao0112, @zng8418, @zons-zhaozhy, @Zyproth
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v2026.4.30...v2026.5.7](https://github.com/NousResearch/hermes-agent/compare/v2026.4.30...v2026.5.7)
|
||||
@@ -1,290 +0,0 @@
|
||||
# Hermes Agent v0.7.0 (v2026.4.3)
|
||||
|
||||
**Release Date:** April 3, 2026
|
||||
|
||||
> The resilience release — pluggable memory providers, credential pool rotation, Camofox anti-detection browser, inline diff previews, gateway hardening across race conditions and approval routing, and deep security fixes across 168 PRs and 46 resolved issues.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Highlights
|
||||
|
||||
- **Pluggable Memory Provider Interface** — Memory is now an extensible plugin system. Third-party memory backends (Honcho, vector stores, custom DBs) implement a simple provider ABC and register via the plugin system. Built-in memory is the default provider. Honcho integration restored to full parity as the reference plugin with profile-scoped host/peer resolution. ([#4623](https://github.com/NousResearch/hermes-agent/pull/4623), [#4616](https://github.com/NousResearch/hermes-agent/pull/4616), [#4355](https://github.com/NousResearch/hermes-agent/pull/4355))
|
||||
|
||||
- **Same-Provider Credential Pools** — Configure multiple API keys for the same provider with automatic rotation. Thread-safe `least_used` strategy distributes load across keys, and 401 failures trigger automatic rotation to the next credential. Set up via the setup wizard or `credential_pool` config. ([#4188](https://github.com/NousResearch/hermes-agent/pull/4188), [#4300](https://github.com/NousResearch/hermes-agent/pull/4300), [#4361](https://github.com/NousResearch/hermes-agent/pull/4361))
|
||||
|
||||
- **Camofox Anti-Detection Browser Backend** — New local browser backend using Camoufox for stealth browsing. Persistent sessions with VNC URL discovery for visual debugging, configurable SSRF bypass for local backends, auto-install via `hermes tools`. ([#4008](https://github.com/NousResearch/hermes-agent/pull/4008), [#4419](https://github.com/NousResearch/hermes-agent/pull/4419), [#4292](https://github.com/NousResearch/hermes-agent/pull/4292))
|
||||
|
||||
- **Inline Diff Previews** — File write and patch operations now show inline diffs in the tool activity feed, giving you visual confirmation of what changed before the agent moves on. ([#4411](https://github.com/NousResearch/hermes-agent/pull/4411), [#4423](https://github.com/NousResearch/hermes-agent/pull/4423))
|
||||
|
||||
- **API Server Session Continuity & Tool Streaming** — The API server (Open WebUI integration) now streams tool progress events in real-time and supports `X-Hermes-Session-Id` headers for persistent sessions across requests. Sessions persist to the shared SessionDB. ([#4092](https://github.com/NousResearch/hermes-agent/pull/4092), [#4478](https://github.com/NousResearch/hermes-agent/pull/4478), [#4802](https://github.com/NousResearch/hermes-agent/pull/4802))
|
||||
|
||||
- **ACP: Client-Provided MCP Servers** — Editor integrations (VS Code, Zed, JetBrains) can now register their own MCP servers, which Hermes picks up as additional agent tools. Your editor's MCP ecosystem flows directly into the agent. ([#4705](https://github.com/NousResearch/hermes-agent/pull/4705))
|
||||
|
||||
- **Gateway Hardening** — Major stability pass across race conditions, photo media delivery, flood control, stuck sessions, approval routing, and compression death spirals. The gateway is substantially more reliable in production. ([#4727](https://github.com/NousResearch/hermes-agent/pull/4727), [#4750](https://github.com/NousResearch/hermes-agent/pull/4750), [#4798](https://github.com/NousResearch/hermes-agent/pull/4798), [#4557](https://github.com/NousResearch/hermes-agent/pull/4557))
|
||||
|
||||
- **Security: Secret Exfiltration Blocking** — Browser URLs and LLM responses are now scanned for secret patterns, blocking exfiltration attempts via URL encoding, base64, or prompt injection. Credential directory protections expanded to `.docker`, `.azure`, `.config/gh`. Execute_code sandbox output is redacted. ([#4483](https://github.com/NousResearch/hermes-agent/pull/4483), [#4360](https://github.com/NousResearch/hermes-agent/pull/4360), [#4305](https://github.com/NousResearch/hermes-agent/pull/4305), [#4327](https://github.com/NousResearch/hermes-agent/pull/4327))
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Core Agent & Architecture
|
||||
|
||||
### Provider & Model Support
|
||||
- **Same-provider credential pools** — configure multiple API keys with automatic `least_used` rotation and 401 failover ([#4188](https://github.com/NousResearch/hermes-agent/pull/4188), [#4300](https://github.com/NousResearch/hermes-agent/pull/4300))
|
||||
- **Credential pool preserved through smart routing** — pool state survives fallback provider switches and defers eager fallback on 429 ([#4361](https://github.com/NousResearch/hermes-agent/pull/4361))
|
||||
- **Per-turn primary runtime restoration** — after fallback provider use, the agent automatically restores the primary provider on the next turn with transport recovery ([#4624](https://github.com/NousResearch/hermes-agent/pull/4624))
|
||||
- **`developer` role for GPT-5 and Codex models** — uses OpenAI's recommended system message role for newer models ([#4498](https://github.com/NousResearch/hermes-agent/pull/4498))
|
||||
- **Google model operational guidance** — Gemini and Gemma models get provider-specific prompting guidance ([#4641](https://github.com/NousResearch/hermes-agent/pull/4641))
|
||||
- **Anthropic long-context tier 429 handling** — automatically reduces context to 200k when hitting tier limits ([#4747](https://github.com/NousResearch/hermes-agent/pull/4747))
|
||||
- **URL-based auth for third-party Anthropic endpoints** + CI test fixes ([#4148](https://github.com/NousResearch/hermes-agent/pull/4148))
|
||||
- **Bearer auth for MiniMax Anthropic endpoints** ([#4028](https://github.com/NousResearch/hermes-agent/pull/4028))
|
||||
- **Fireworks context length detection** ([#4158](https://github.com/NousResearch/hermes-agent/pull/4158))
|
||||
- **Standard DashScope international endpoint** for Alibaba provider ([#4133](https://github.com/NousResearch/hermes-agent/pull/4133), closes [#3912](https://github.com/NousResearch/hermes-agent/issues/3912))
|
||||
- **Custom providers context_length** honored in hygiene compression ([#4085](https://github.com/NousResearch/hermes-agent/pull/4085))
|
||||
- **Non-sk-ant keys** treated as regular API keys, not OAuth tokens ([#4093](https://github.com/NousResearch/hermes-agent/pull/4093))
|
||||
- **Claude-sonnet-4.6** added to OpenRouter and Nous model lists ([#4157](https://github.com/NousResearch/hermes-agent/pull/4157))
|
||||
- **Qwen 3.6 Plus Preview** added to model lists ([#4376](https://github.com/NousResearch/hermes-agent/pull/4376))
|
||||
- **MiniMax M2.7** added to hermes model picker and OpenCode ([#4208](https://github.com/NousResearch/hermes-agent/pull/4208))
|
||||
- **Auto-detect models from server probe** in custom endpoint setup ([#4218](https://github.com/NousResearch/hermes-agent/pull/4218))
|
||||
- **Config.yaml single source of truth** for endpoint URLs — no more env var vs config.yaml conflicts ([#4165](https://github.com/NousResearch/hermes-agent/pull/4165))
|
||||
- **Setup wizard no longer overwrites** custom endpoint config ([#4180](https://github.com/NousResearch/hermes-agent/pull/4180), closes [#4172](https://github.com/NousResearch/hermes-agent/issues/4172))
|
||||
- **Unified setup wizard provider selection** with `hermes model` — single code path for both flows ([#4200](https://github.com/NousResearch/hermes-agent/pull/4200))
|
||||
- **Root-level provider config** no longer overrides `model.provider` ([#4329](https://github.com/NousResearch/hermes-agent/pull/4329))
|
||||
- **Rate-limit pairing rejection messages** to prevent spam ([#4081](https://github.com/NousResearch/hermes-agent/pull/4081))
|
||||
|
||||
### Agent Loop & Conversation
|
||||
- **Preserve Anthropic thinking block signatures** across tool-use turns ([#4626](https://github.com/NousResearch/hermes-agent/pull/4626))
|
||||
- **Classify think-only empty responses** before retrying — prevents infinite retry loops on models that produce thinking blocks without content ([#4645](https://github.com/NousResearch/hermes-agent/pull/4645))
|
||||
- **Prevent compression death spiral** from API disconnects — stops the loop where compression triggers, fails, compresses again ([#4750](https://github.com/NousResearch/hermes-agent/pull/4750), closes [#2153](https://github.com/NousResearch/hermes-agent/issues/2153))
|
||||
- **Persist compressed context** to gateway session after mid-run compression ([#4095](https://github.com/NousResearch/hermes-agent/pull/4095))
|
||||
- **Context-exceeded error messages** now include actionable guidance ([#4155](https://github.com/NousResearch/hermes-agent/pull/4155), closes [#4061](https://github.com/NousResearch/hermes-agent/issues/4061))
|
||||
- **Strip orphaned think/reasoning tags** from user-facing responses ([#4311](https://github.com/NousResearch/hermes-agent/pull/4311), closes [#4285](https://github.com/NousResearch/hermes-agent/issues/4285))
|
||||
- **Harden Codex responses preflight** and stream error handling ([#4313](https://github.com/NousResearch/hermes-agent/pull/4313))
|
||||
- **Deterministic call_id fallbacks** instead of random UUIDs for prompt cache consistency ([#3991](https://github.com/NousResearch/hermes-agent/pull/3991))
|
||||
- **Context pressure warning spam** prevented after compression ([#4012](https://github.com/NousResearch/hermes-agent/pull/4012))
|
||||
- **AsyncOpenAI created lazily** in trajectory compressor to avoid closed event loop errors ([#4013](https://github.com/NousResearch/hermes-agent/pull/4013))
|
||||
|
||||
### Memory & Sessions
|
||||
- **Pluggable memory provider interface** — ABC-based plugin system for custom memory backends with profile isolation ([#4623](https://github.com/NousResearch/hermes-agent/pull/4623))
|
||||
- **Honcho full integration parity** restored as reference memory provider plugin ([#4355](https://github.com/NousResearch/hermes-agent/pull/4355)) — @erosika
|
||||
- **Honcho profile-scoped** host and peer resolution ([#4616](https://github.com/NousResearch/hermes-agent/pull/4616))
|
||||
- **Memory flush state persisted** to prevent redundant re-flushes on gateway restart ([#4481](https://github.com/NousResearch/hermes-agent/pull/4481))
|
||||
- **Memory provider tools** routed through sequential execution path ([#4803](https://github.com/NousResearch/hermes-agent/pull/4803))
|
||||
- **Honcho config** written to instance-local path for profile isolation ([#4037](https://github.com/NousResearch/hermes-agent/pull/4037))
|
||||
- **API server sessions** persist to shared SessionDB ([#4802](https://github.com/NousResearch/hermes-agent/pull/4802))
|
||||
- **Token usage persisted** for non-CLI sessions ([#4627](https://github.com/NousResearch/hermes-agent/pull/4627))
|
||||
- **Quote dotted terms in FTS5 queries** — fixes session search for terms containing dots ([#4549](https://github.com/NousResearch/hermes-agent/pull/4549))
|
||||
|
||||
---
|
||||
|
||||
## 📱 Messaging Platforms (Gateway)
|
||||
|
||||
### Gateway Core
|
||||
- **Race condition fixes** — photo media loss, flood control, stuck sessions, and STT config issues resolved in one hardening pass ([#4727](https://github.com/NousResearch/hermes-agent/pull/4727))
|
||||
- **Approval routing through running-agent guard** — `/approve` and `/deny` now route correctly when the agent is blocked waiting for approval instead of being swallowed as interrupts ([#4798](https://github.com/NousResearch/hermes-agent/pull/4798), [#4557](https://github.com/NousResearch/hermes-agent/pull/4557), closes [#4542](https://github.com/NousResearch/hermes-agent/issues/4542))
|
||||
- **Resume agent after /approve** — tool result is no longer lost when executing blocked commands ([#4418](https://github.com/NousResearch/hermes-agent/pull/4418))
|
||||
- **DM thread sessions seeded** with parent transcript to preserve context ([#4559](https://github.com/NousResearch/hermes-agent/pull/4559))
|
||||
- **Skill-aware slash commands** — gateway dynamically registers installed skills as slash commands with paginated `/commands` list and Telegram 100-command cap ([#3934](https://github.com/NousResearch/hermes-agent/pull/3934), [#4005](https://github.com/NousResearch/hermes-agent/pull/4005), [#4006](https://github.com/NousResearch/hermes-agent/pull/4006), [#4010](https://github.com/NousResearch/hermes-agent/pull/4010), [#4023](https://github.com/NousResearch/hermes-agent/pull/4023))
|
||||
- **Per-platform disabled skills** respected in Telegram menu and gateway dispatch ([#4799](https://github.com/NousResearch/hermes-agent/pull/4799))
|
||||
- **Remove user-facing compression warnings** — cleaner message flow ([#4139](https://github.com/NousResearch/hermes-agent/pull/4139))
|
||||
- **`-v/-q` flags wired to stderr logging** for gateway service ([#4474](https://github.com/NousResearch/hermes-agent/pull/4474))
|
||||
- **HERMES_HOME remapped** to target user in system service unit ([#4456](https://github.com/NousResearch/hermes-agent/pull/4456))
|
||||
- **Honor default for invalid bool-like config values** ([#4029](https://github.com/NousResearch/hermes-agent/pull/4029))
|
||||
- **setsid instead of systemd-run** for `/update` command to avoid systemd permission issues ([#4104](https://github.com/NousResearch/hermes-agent/pull/4104), closes [#4017](https://github.com/NousResearch/hermes-agent/issues/4017))
|
||||
- **'Initializing agent...'** shown on first message for better UX ([#4086](https://github.com/NousResearch/hermes-agent/pull/4086))
|
||||
- **Allow running gateway service as root** for LXC/container environments ([#4732](https://github.com/NousResearch/hermes-agent/pull/4732))
|
||||
|
||||
### Telegram
|
||||
- **32-char limit on command names** with collision avoidance ([#4211](https://github.com/NousResearch/hermes-agent/pull/4211))
|
||||
- **Priority order enforced** in menu — core > plugins > skills ([#4023](https://github.com/NousResearch/hermes-agent/pull/4023))
|
||||
- **Capped at 50 commands** — API rejects above ~60 ([#4006](https://github.com/NousResearch/hermes-agent/pull/4006))
|
||||
- **Skip empty/whitespace text** to prevent 400 errors ([#4388](https://github.com/NousResearch/hermes-agent/pull/4388))
|
||||
- **E2E gateway tests** added ([#4497](https://github.com/NousResearch/hermes-agent/pull/4497)) — @pefontana
|
||||
|
||||
### Discord
|
||||
- **Button-based approval UI** — register `/approve` and `/deny` slash commands with interactive button prompts ([#4800](https://github.com/NousResearch/hermes-agent/pull/4800))
|
||||
- **Configurable reactions** — `discord.reactions` config option to disable message processing reactions ([#4199](https://github.com/NousResearch/hermes-agent/pull/4199))
|
||||
- **Skip reactions and auto-threading** for unauthorized users ([#4387](https://github.com/NousResearch/hermes-agent/pull/4387))
|
||||
|
||||
### Slack
|
||||
- **Reply in thread** — `slack.reply_in_thread` config option for threaded responses ([#4643](https://github.com/NousResearch/hermes-agent/pull/4643), closes [#2662](https://github.com/NousResearch/hermes-agent/issues/2662))
|
||||
|
||||
### WhatsApp
|
||||
- **Enforce require_mention in group chats** ([#4730](https://github.com/NousResearch/hermes-agent/pull/4730))
|
||||
|
||||
### Webhook
|
||||
- **Platform support fixes** — skip home channel prompt, disable tool progress for webhook adapters ([#4660](https://github.com/NousResearch/hermes-agent/pull/4660))
|
||||
|
||||
### Matrix
|
||||
- **E2EE decryption hardening** — request missing keys, auto-trust devices, retry buffered events ([#4083](https://github.com/NousResearch/hermes-agent/pull/4083))
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ CLI & User Experience
|
||||
|
||||
### New Slash Commands
|
||||
- **`/yolo`** — toggle dangerous command approvals on/off for the session ([#3990](https://github.com/NousResearch/hermes-agent/pull/3990))
|
||||
- **`/btw`** — ephemeral side questions that don't affect the main conversation context ([#4161](https://github.com/NousResearch/hermes-agent/pull/4161))
|
||||
- **`/profile`** — show active profile info without leaving the chat session ([#4027](https://github.com/NousResearch/hermes-agent/pull/4027))
|
||||
|
||||
### Interactive CLI
|
||||
- **Inline diff previews** for write and patch operations in the tool activity feed ([#4411](https://github.com/NousResearch/hermes-agent/pull/4411), [#4423](https://github.com/NousResearch/hermes-agent/pull/4423))
|
||||
- **TUI pinned to bottom** on startup — no more large blank spaces between response and input ([#4412](https://github.com/NousResearch/hermes-agent/pull/4412), [#4359](https://github.com/NousResearch/hermes-agent/pull/4359), closes [#4398](https://github.com/NousResearch/hermes-agent/issues/4398), [#4421](https://github.com/NousResearch/hermes-agent/issues/4421))
|
||||
- **`/history` and `/resume`** now surface recent sessions directly instead of requiring search ([#4728](https://github.com/NousResearch/hermes-agent/pull/4728))
|
||||
- **Cache tokens shown** in `/insights` overview so total adds up ([#4428](https://github.com/NousResearch/hermes-agent/pull/4428))
|
||||
- **`--max-turns` CLI flag** for `hermes chat` to limit agent iterations ([#4314](https://github.com/NousResearch/hermes-agent/pull/4314))
|
||||
- **Detect dragged file paths** instead of treating them as slash commands ([#4533](https://github.com/NousResearch/hermes-agent/pull/4533)) — @rolme
|
||||
- **Allow empty strings and falsy values** in `config set` ([#4310](https://github.com/NousResearch/hermes-agent/pull/4310), closes [#4277](https://github.com/NousResearch/hermes-agent/issues/4277))
|
||||
- **Voice mode in WSL** when PulseAudio bridge is configured ([#4317](https://github.com/NousResearch/hermes-agent/pull/4317))
|
||||
- **Respect `NO_COLOR` env var** and `TERM=dumb` for accessibility ([#4079](https://github.com/NousResearch/hermes-agent/pull/4079), closes [#4066](https://github.com/NousResearch/hermes-agent/issues/4066)) — @SHL0MS
|
||||
- **Correct shell reload instruction** for macOS/zsh users ([#4025](https://github.com/NousResearch/hermes-agent/pull/4025))
|
||||
- **Zero exit code** on successful quiet mode queries ([#4613](https://github.com/NousResearch/hermes-agent/pull/4613), closes [#4601](https://github.com/NousResearch/hermes-agent/issues/4601)) — @devorun
|
||||
- **on_session_end hook fires** on interrupted exits ([#4159](https://github.com/NousResearch/hermes-agent/pull/4159))
|
||||
- **Profile list display** reads `model.default` key correctly ([#4160](https://github.com/NousResearch/hermes-agent/pull/4160))
|
||||
- **Browser and TTS** shown in reconfigure menu ([#4041](https://github.com/NousResearch/hermes-agent/pull/4041))
|
||||
- **Web backend priority** detection simplified ([#4036](https://github.com/NousResearch/hermes-agent/pull/4036))
|
||||
|
||||
### Setup & Configuration
|
||||
- **Allowed_users preserved** during setup and quiet unconfigured provider warnings ([#4551](https://github.com/NousResearch/hermes-agent/pull/4551)) — @kshitijk4poor
|
||||
- **Save API key to model config** for custom endpoints ([#4202](https://github.com/NousResearch/hermes-agent/pull/4202), closes [#4182](https://github.com/NousResearch/hermes-agent/issues/4182))
|
||||
- **Claude Code credentials gated** behind explicit Hermes config in wizard trigger ([#4210](https://github.com/NousResearch/hermes-agent/pull/4210))
|
||||
- **Atomic writes in save_config_value** to prevent config loss on interrupt ([#4298](https://github.com/NousResearch/hermes-agent/pull/4298), [#4320](https://github.com/NousResearch/hermes-agent/pull/4320))
|
||||
- **Scopes field written** to Claude Code credentials on token refresh ([#4126](https://github.com/NousResearch/hermes-agent/pull/4126))
|
||||
|
||||
### Update System
|
||||
- **Fork detection and upstream sync** in `hermes update` ([#4744](https://github.com/NousResearch/hermes-agent/pull/4744))
|
||||
- **Preserve working optional extras** when one extra fails during update ([#4550](https://github.com/NousResearch/hermes-agent/pull/4550))
|
||||
- **Handle conflicted git index** during hermes update ([#4735](https://github.com/NousResearch/hermes-agent/pull/4735))
|
||||
- **Avoid launchd restart race** on macOS ([#4736](https://github.com/NousResearch/hermes-agent/pull/4736))
|
||||
- **Missing subprocess.run() timeouts** added to doctor and status commands ([#4009](https://github.com/NousResearch/hermes-agent/pull/4009))
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Tool System
|
||||
|
||||
### Browser
|
||||
- **Camofox anti-detection browser backend** — local stealth browsing with auto-install via `hermes tools` ([#4008](https://github.com/NousResearch/hermes-agent/pull/4008))
|
||||
- **Persistent Camofox sessions** with VNC URL discovery for visual debugging ([#4419](https://github.com/NousResearch/hermes-agent/pull/4419))
|
||||
- **Skip SSRF check for local backends** (Camofox, headless Chromium) ([#4292](https://github.com/NousResearch/hermes-agent/pull/4292))
|
||||
- **Configurable SSRF check** via `browser.allow_private_urls` ([#4198](https://github.com/NousResearch/hermes-agent/pull/4198)) — @nils010485
|
||||
- **CAMOFOX_PORT=9377** added to Docker commands ([#4340](https://github.com/NousResearch/hermes-agent/pull/4340))
|
||||
|
||||
### File Operations
|
||||
- **Inline diff previews** on write and patch actions ([#4411](https://github.com/NousResearch/hermes-agent/pull/4411), [#4423](https://github.com/NousResearch/hermes-agent/pull/4423))
|
||||
- **Stale file detection** on write and patch — warns when file was modified externally since last read ([#4345](https://github.com/NousResearch/hermes-agent/pull/4345))
|
||||
- **Staleness timestamp refreshed** after writes ([#4390](https://github.com/NousResearch/hermes-agent/pull/4390))
|
||||
- **Size guard, dedup, and device blocking** on read_file ([#4315](https://github.com/NousResearch/hermes-agent/pull/4315))
|
||||
|
||||
### MCP
|
||||
- **Stability fix pack** — reload timeout, shutdown cleanup, event loop handler, OAuth non-blocking ([#4757](https://github.com/NousResearch/hermes-agent/pull/4757), closes [#4462](https://github.com/NousResearch/hermes-agent/issues/4462), [#2537](https://github.com/NousResearch/hermes-agent/issues/2537))
|
||||
|
||||
### ACP (Editor Integration)
|
||||
- **Client-provided MCP servers** registered as agent tools — editors pass their MCP servers to Hermes ([#4705](https://github.com/NousResearch/hermes-agent/pull/4705))
|
||||
|
||||
### Skills System
|
||||
- **Size limits for agent writes** and **fuzzy matching for skill patch** — prevents oversized skill writes and improves edit reliability ([#4414](https://github.com/NousResearch/hermes-agent/pull/4414))
|
||||
- **Validate hub bundle paths** before install — blocks path traversal in skill bundles ([#3986](https://github.com/NousResearch/hermes-agent/pull/3986))
|
||||
- **Unified hermes-agent and hermes-agent-setup** into single skill ([#4332](https://github.com/NousResearch/hermes-agent/pull/4332))
|
||||
- **Skill metadata type check** in extract_skill_conditions ([#4479](https://github.com/NousResearch/hermes-agent/pull/4479))
|
||||
|
||||
### New/Updated Skills
|
||||
- **research-paper-writing** — full end-to-end research pipeline (replaced ml-paper-writing) ([#4654](https://github.com/NousResearch/hermes-agent/pull/4654)) — @SHL0MS
|
||||
- **ascii-video** — text readability techniques and external layout oracle ([#4054](https://github.com/NousResearch/hermes-agent/pull/4054)) — @SHL0MS
|
||||
- **youtube-transcript** updated for youtube-transcript-api v1.x ([#4455](https://github.com/NousResearch/hermes-agent/pull/4455)) — @el-analista
|
||||
- **Skills browse and search page** added to documentation site ([#4500](https://github.com/NousResearch/hermes-agent/pull/4500)) — @IAvecilla
|
||||
|
||||
---
|
||||
|
||||
## 🔒 Security & Reliability
|
||||
|
||||
### Security Hardening
|
||||
- **Block secret exfiltration** via browser URLs and LLM responses — scans for secret patterns in URL encoding, base64, and prompt injection vectors ([#4483](https://github.com/NousResearch/hermes-agent/pull/4483))
|
||||
- **Redact secrets from execute_code sandbox output** ([#4360](https://github.com/NousResearch/hermes-agent/pull/4360))
|
||||
- **Protect `.docker`, `.azure`, `.config/gh` credential directories** from read/write via file tools and terminal ([#4305](https://github.com/NousResearch/hermes-agent/pull/4305), [#4327](https://github.com/NousResearch/hermes-agent/pull/4327)) — @memosr
|
||||
- **GitHub OAuth token patterns** added to redaction + snapshot redact flag ([#4295](https://github.com/NousResearch/hermes-agent/pull/4295))
|
||||
- **Reject private and loopback IPs** in Telegram DoH fallback ([#4129](https://github.com/NousResearch/hermes-agent/pull/4129))
|
||||
- **Reject path traversal** in credential file registration ([#4316](https://github.com/NousResearch/hermes-agent/pull/4316))
|
||||
- **Validate tar archive member paths** on profile import — blocks zip-slip attacks ([#4318](https://github.com/NousResearch/hermes-agent/pull/4318))
|
||||
- **Exclude auth.json and .env** from profile exports ([#4475](https://github.com/NousResearch/hermes-agent/pull/4475))
|
||||
|
||||
### Reliability
|
||||
- **Prevent compression death spiral** from API disconnects ([#4750](https://github.com/NousResearch/hermes-agent/pull/4750), closes [#2153](https://github.com/NousResearch/hermes-agent/issues/2153))
|
||||
- **Handle `is_closed` as method** in OpenAI SDK — prevents false positive client closure detection ([#4416](https://github.com/NousResearch/hermes-agent/pull/4416), closes [#4377](https://github.com/NousResearch/hermes-agent/issues/4377))
|
||||
- **Exclude matrix from [all] extras** — python-olm is upstream-broken, prevents install failures ([#4615](https://github.com/NousResearch/hermes-agent/pull/4615), closes [#4178](https://github.com/NousResearch/hermes-agent/issues/4178))
|
||||
- **OpenCode model routing** repaired ([#4508](https://github.com/NousResearch/hermes-agent/pull/4508))
|
||||
- **Docker container image** optimized ([#4034](https://github.com/NousResearch/hermes-agent/pull/4034)) — @bcross
|
||||
|
||||
### Windows & Cross-Platform
|
||||
- **Voice mode in WSL** with PulseAudio bridge ([#4317](https://github.com/NousResearch/hermes-agent/pull/4317))
|
||||
- **Homebrew packaging** preparation ([#4099](https://github.com/NousResearch/hermes-agent/pull/4099))
|
||||
- **CI fork conditionals** to prevent workflow failures on forks ([#4107](https://github.com/NousResearch/hermes-agent/pull/4107))
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Notable Bug Fixes
|
||||
|
||||
- **Gateway approval blocked agent thread** — approval now blocks the agent thread like CLI does, preventing tool result loss ([#4557](https://github.com/NousResearch/hermes-agent/pull/4557), closes [#4542](https://github.com/NousResearch/hermes-agent/issues/4542))
|
||||
- **Compression death spiral** from API disconnects — detected and halted instead of looping ([#4750](https://github.com/NousResearch/hermes-agent/pull/4750), closes [#2153](https://github.com/NousResearch/hermes-agent/issues/2153))
|
||||
- **Anthropic thinking blocks lost** across tool-use turns ([#4626](https://github.com/NousResearch/hermes-agent/pull/4626))
|
||||
- **Profile model config ignored** with `-p` flag — model.model now promoted to model.default correctly ([#4160](https://github.com/NousResearch/hermes-agent/pull/4160), closes [#4486](https://github.com/NousResearch/hermes-agent/issues/4486))
|
||||
- **CLI blank space** between response and input area ([#4412](https://github.com/NousResearch/hermes-agent/pull/4412), [#4359](https://github.com/NousResearch/hermes-agent/pull/4359), closes [#4398](https://github.com/NousResearch/hermes-agent/issues/4398))
|
||||
- **Dragged file paths** treated as slash commands instead of file references ([#4533](https://github.com/NousResearch/hermes-agent/pull/4533)) — @rolme
|
||||
- **Orphaned `</think>` tags** leaking into user-facing responses ([#4311](https://github.com/NousResearch/hermes-agent/pull/4311), closes [#4285](https://github.com/NousResearch/hermes-agent/issues/4285))
|
||||
- **OpenAI SDK `is_closed`** is a method not property — false positive client closure ([#4416](https://github.com/NousResearch/hermes-agent/pull/4416), closes [#4377](https://github.com/NousResearch/hermes-agent/issues/4377))
|
||||
- **MCP OAuth server** could block Hermes startup instead of degrading gracefully ([#4757](https://github.com/NousResearch/hermes-agent/pull/4757), closes [#4462](https://github.com/NousResearch/hermes-agent/issues/4462))
|
||||
- **MCP event loop closed** on shutdown with HTTP servers ([#4757](https://github.com/NousResearch/hermes-agent/pull/4757), closes [#2537](https://github.com/NousResearch/hermes-agent/issues/2537))
|
||||
- **Alibaba provider** hardcoded to wrong endpoint ([#4133](https://github.com/NousResearch/hermes-agent/pull/4133), closes [#3912](https://github.com/NousResearch/hermes-agent/issues/3912))
|
||||
- **Slack reply_in_thread** missing config option ([#4643](https://github.com/NousResearch/hermes-agent/pull/4643), closes [#2662](https://github.com/NousResearch/hermes-agent/issues/2662))
|
||||
- **Quiet mode exit code** — successful `-q` queries no longer exit nonzero ([#4613](https://github.com/NousResearch/hermes-agent/pull/4613), closes [#4601](https://github.com/NousResearch/hermes-agent/issues/4601))
|
||||
- **Mobile sidebar** shows only close button due to backdrop-filter issue in docs site ([#4207](https://github.com/NousResearch/hermes-agent/pull/4207)) — @xsmyile
|
||||
- **Config restore reverted** by stale-branch squash merge — `_config_version` fixed ([#4440](https://github.com/NousResearch/hermes-agent/pull/4440))
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
- **Telegram gateway E2E tests** — full integration test suite for the Telegram adapter ([#4497](https://github.com/NousResearch/hermes-agent/pull/4497)) — @pefontana
|
||||
- **11 real test failures fixed** plus sys.modules cascade poisoner resolved ([#4570](https://github.com/NousResearch/hermes-agent/pull/4570))
|
||||
- **7 CI failures resolved** across hooks, plugins, and skill tests ([#3936](https://github.com/NousResearch/hermes-agent/pull/3936))
|
||||
- **Codex 401 refresh tests** updated for CI compatibility ([#4166](https://github.com/NousResearch/hermes-agent/pull/4166))
|
||||
- **Stale OPENAI_BASE_URL test** fixed ([#4217](https://github.com/NousResearch/hermes-agent/pull/4217))
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **Comprehensive documentation audit** — 9 HIGH and 20+ MEDIUM gaps fixed across 21 files ([#4087](https://github.com/NousResearch/hermes-agent/pull/4087))
|
||||
- **Site navigation restructured** — features and platforms promoted to top-level ([#4116](https://github.com/NousResearch/hermes-agent/pull/4116))
|
||||
- **Tool progress streaming** documented for API server and Open WebUI ([#4138](https://github.com/NousResearch/hermes-agent/pull/4138))
|
||||
- **Telegram webhook mode** documentation ([#4089](https://github.com/NousResearch/hermes-agent/pull/4089))
|
||||
- **Local LLM provider guides** — comprehensive setup guides with context length warnings ([#4294](https://github.com/NousResearch/hermes-agent/pull/4294))
|
||||
- **WhatsApp allowlist behavior** clarified with `WHATSAPP_ALLOW_ALL_USERS` documentation ([#4293](https://github.com/NousResearch/hermes-agent/pull/4293))
|
||||
- **Slack configuration options** — new config section in Slack docs ([#4644](https://github.com/NousResearch/hermes-agent/pull/4644))
|
||||
- **Terminal backends section** expanded + docs build fixes ([#4016](https://github.com/NousResearch/hermes-agent/pull/4016))
|
||||
- **Adding-providers guide** updated for unified setup flow ([#4201](https://github.com/NousResearch/hermes-agent/pull/4201))
|
||||
- **ACP Zed config** fixed ([#4743](https://github.com/NousResearch/hermes-agent/pull/4743))
|
||||
- **Community FAQ** entries for common workflows and troubleshooting ([#4797](https://github.com/NousResearch/hermes-agent/pull/4797))
|
||||
- **Skills browse and search page** on docs site ([#4500](https://github.com/NousResearch/hermes-agent/pull/4500)) — @IAvecilla
|
||||
|
||||
---
|
||||
|
||||
## 👥 Contributors
|
||||
|
||||
### Core
|
||||
- **@teknium1** — 135 commits across all subsystems
|
||||
|
||||
### Top Community Contributors
|
||||
- **@kshitijk4poor** — 13 commits: preserve allowed_users during setup ([#4551](https://github.com/NousResearch/hermes-agent/pull/4551)), and various fixes
|
||||
- **@erosika** — 12 commits: Honcho full integration parity restored as memory provider plugin ([#4355](https://github.com/NousResearch/hermes-agent/pull/4355))
|
||||
- **@pefontana** — 9 commits: Telegram gateway E2E test suite ([#4497](https://github.com/NousResearch/hermes-agent/pull/4497))
|
||||
- **@bcross** — 5 commits: Docker container image optimization ([#4034](https://github.com/NousResearch/hermes-agent/pull/4034))
|
||||
- **@SHL0MS** — 4 commits: NO_COLOR/TERM=dumb support ([#4079](https://github.com/NousResearch/hermes-agent/pull/4079)), ascii-video skill updates ([#4054](https://github.com/NousResearch/hermes-agent/pull/4054)), research-paper-writing skill ([#4654](https://github.com/NousResearch/hermes-agent/pull/4654))
|
||||
|
||||
### All Contributors
|
||||
@0xbyt4, @arasovic, @Bartok9, @bcross, @binhnt92, @camden-lowrance, @curtitoo, @Dakota, @Dave Tist, @Dean Kerr, @devorun, @dieutx, @Dilee, @el-analista, @erosika, @Gutslabs, @IAvecilla, @Jack, @Johannnnn506, @kshitijk4poor, @Laura Batalha, @Leegenux, @Lume, @MacroAnarchy, @maymuneth, @memosr, @NexVeridian, @Nick, @nils010485, @pefontana, @Penov, @rolme, @SHL0MS, @txchen, @xsmyile
|
||||
|
||||
### Issues Resolved from Community
|
||||
@acsezen ([#2537](https://github.com/NousResearch/hermes-agent/issues/2537)), @arasovic ([#4285](https://github.com/NousResearch/hermes-agent/issues/4285)), @camden-lowrance ([#4462](https://github.com/NousResearch/hermes-agent/issues/4462)), @devorun ([#4601](https://github.com/NousResearch/hermes-agent/issues/4601)), @eloklam ([#4486](https://github.com/NousResearch/hermes-agent/issues/4486)), @HenkDz ([#3719](https://github.com/NousResearch/hermes-agent/issues/3719)), @hypotyposis ([#2153](https://github.com/NousResearch/hermes-agent/issues/2153)), @kazamak ([#4178](https://github.com/NousResearch/hermes-agent/issues/4178)), @lstep ([#4366](https://github.com/NousResearch/hermes-agent/issues/4366)), @Mark-Lok ([#4542](https://github.com/NousResearch/hermes-agent/issues/4542)), @NoJster ([#4421](https://github.com/NousResearch/hermes-agent/issues/4421)), @patp ([#2662](https://github.com/NousResearch/hermes-agent/issues/2662)), @pr0n ([#4601](https://github.com/NousResearch/hermes-agent/issues/4601)), @saulmc ([#4377](https://github.com/NousResearch/hermes-agent/issues/4377)), @SHL0MS ([#4060](https://github.com/NousResearch/hermes-agent/issues/4060), [#4061](https://github.com/NousResearch/hermes-agent/issues/4061), [#4066](https://github.com/NousResearch/hermes-agent/issues/4066), [#4172](https://github.com/NousResearch/hermes-agent/issues/4172), [#4277](https://github.com/NousResearch/hermes-agent/issues/4277)), @Z-Mackintosh ([#4398](https://github.com/NousResearch/hermes-agent/issues/4398))
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v2026.3.30...v2026.4.3](https://github.com/NousResearch/hermes-agent/compare/v2026.3.30...v2026.4.3)
|
||||
@@ -1,346 +0,0 @@
|
||||
# Hermes Agent v0.8.0 (v2026.4.8)
|
||||
|
||||
**Release Date:** April 8, 2026
|
||||
|
||||
> The intelligence release — background task auto-notifications, free MiMo v2 Pro on Nous Portal, live model switching across all platforms, self-optimized GPT/Codex guidance, native Google AI Studio, smart inactivity timeouts, approval buttons, MCP OAuth 2.1, and 209 merged PRs with 82 resolved issues.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Highlights
|
||||
|
||||
- **Background Process Auto-Notifications (`notify_on_complete`)** — Background tasks can now automatically notify the agent when they finish. Start a long-running process (AI model training, test suites, deployments, builds) and the agent gets notified on completion — no polling needed. The agent can keep working on other things and pick up results when they land. ([#5779](https://github.com/NousResearch/hermes-agent/pull/5779))
|
||||
|
||||
- **Free Xiaomi MiMo v2 Pro on Nous Portal** — Nous Portal now supports the free-tier Xiaomi MiMo v2 Pro model for auxiliary tasks (compression, vision, summarization), with free-tier model gating and pricing display in model selection. ([#6018](https://github.com/NousResearch/hermes-agent/pull/6018), [#5880](https://github.com/NousResearch/hermes-agent/pull/5880))
|
||||
|
||||
- **Live Model Switching (`/model` Command)** — Switch models and providers mid-session from CLI, Telegram, Discord, Slack, or any gateway platform. Aggregator-aware resolution keeps you on OpenRouter/Nous when possible, with automatic cross-provider fallback when needed. Interactive model pickers on Telegram and Discord with inline buttons. ([#5181](https://github.com/NousResearch/hermes-agent/pull/5181), [#5742](https://github.com/NousResearch/hermes-agent/pull/5742))
|
||||
|
||||
- **Self-Optimized GPT/Codex Tool-Use Guidance** — The agent diagnosed and patched 5 failure modes in GPT and Codex tool calling through automated behavioral benchmarking, dramatically improving reliability on OpenAI models. Includes execution discipline guidance and thinking-only prefill continuation for structured reasoning. ([#6120](https://github.com/NousResearch/hermes-agent/pull/6120), [#5414](https://github.com/NousResearch/hermes-agent/pull/5414), [#5931](https://github.com/NousResearch/hermes-agent/pull/5931))
|
||||
|
||||
- **Google AI Studio (Gemini) Native Provider** — Direct access to Gemini models through Google's AI Studio API. Includes automatic models.dev registry integration for real-time context length detection across any provider. ([#5577](https://github.com/NousResearch/hermes-agent/pull/5577))
|
||||
|
||||
- **Inactivity-Based Agent Timeouts** — Gateway and cron timeouts now track actual tool activity instead of wall-clock time. Long-running tasks that are actively working will never be killed — only truly idle agents time out. ([#5389](https://github.com/NousResearch/hermes-agent/pull/5389), [#5440](https://github.com/NousResearch/hermes-agent/pull/5440))
|
||||
|
||||
- **Approval Buttons on Slack & Telegram** — Dangerous command approval via native platform buttons instead of typing `/approve`. Slack gets thread context preservation; Telegram gets emoji reactions for approval status. ([#5890](https://github.com/NousResearch/hermes-agent/pull/5890), [#5975](https://github.com/NousResearch/hermes-agent/pull/5975))
|
||||
|
||||
- **MCP OAuth 2.1 PKCE + OSV Malware Scanning** — Full standards-compliant OAuth for MCP server authentication, plus automatic malware scanning of MCP extension packages via the OSV vulnerability database. ([#5420](https://github.com/NousResearch/hermes-agent/pull/5420), [#5305](https://github.com/NousResearch/hermes-agent/pull/5305))
|
||||
|
||||
- **Centralized Logging & Config Validation** — Structured logging to `~/.hermes/logs/` (agent.log + errors.log) with the `hermes logs` command for tailing and filtering. Config structure validation catches malformed YAML at startup before it causes cryptic failures. ([#5430](https://github.com/NousResearch/hermes-agent/pull/5430), [#5426](https://github.com/NousResearch/hermes-agent/pull/5426))
|
||||
|
||||
- **Plugin System Expansion** — Plugins can now register CLI subcommands, receive request-scoped API hooks with correlation IDs, prompt for required env vars during install, and hook into session lifecycle events (finalize/reset). ([#5295](https://github.com/NousResearch/hermes-agent/pull/5295), [#5427](https://github.com/NousResearch/hermes-agent/pull/5427), [#5470](https://github.com/NousResearch/hermes-agent/pull/5470), [#6129](https://github.com/NousResearch/hermes-agent/pull/6129))
|
||||
|
||||
- **Matrix Tier 1 & Platform Hardening** — Matrix gets reactions, read receipts, rich formatting, and room management. Discord adds channel controls and ignored channels. Signal gets full MEDIA: tag delivery. Mattermost gets file attachments. Comprehensive reliability fixes across all platforms. ([#5275](https://github.com/NousResearch/hermes-agent/pull/5275), [#5975](https://github.com/NousResearch/hermes-agent/pull/5975), [#5602](https://github.com/NousResearch/hermes-agent/pull/5602))
|
||||
|
||||
- **Security Hardening Pass** — Consolidated SSRF protections, timing attack mitigations, tar traversal prevention, credential leakage guards, cron path traversal hardening, and cross-session isolation. Terminal workdir sanitization across all backends. ([#5944](https://github.com/NousResearch/hermes-agent/pull/5944), [#5613](https://github.com/NousResearch/hermes-agent/pull/5613), [#5629](https://github.com/NousResearch/hermes-agent/pull/5629))
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Core Agent & Architecture
|
||||
|
||||
### Provider & Model Support
|
||||
- **Native Google AI Studio (Gemini) provider** with models.dev integration for automatic context length detection ([#5577](https://github.com/NousResearch/hermes-agent/pull/5577))
|
||||
- **`/model` command — full provider+model system overhaul** — live switching across CLI and all gateway platforms with aggregator-aware resolution ([#5181](https://github.com/NousResearch/hermes-agent/pull/5181))
|
||||
- **Interactive model picker for Telegram and Discord** — inline button-based model selection ([#5742](https://github.com/NousResearch/hermes-agent/pull/5742))
|
||||
- **Nous Portal free-tier model gating** with pricing display in model selection ([#5880](https://github.com/NousResearch/hermes-agent/pull/5880))
|
||||
- **Model pricing display** for OpenRouter and Nous Portal providers ([#5416](https://github.com/NousResearch/hermes-agent/pull/5416))
|
||||
- **xAI (Grok) prompt caching** via `x-grok-conv-id` header ([#5604](https://github.com/NousResearch/hermes-agent/pull/5604))
|
||||
- **Grok added to tool-use enforcement models** for direct xAI usage ([#5595](https://github.com/NousResearch/hermes-agent/pull/5595))
|
||||
- **MiniMax TTS provider** (speech-2.8) ([#4963](https://github.com/NousResearch/hermes-agent/pull/4963))
|
||||
- **Non-agentic model warning** — warns users when loading Hermes LLM models not designed for tool use ([#5378](https://github.com/NousResearch/hermes-agent/pull/5378))
|
||||
- **Ollama Cloud auth, /model switch persistence**, and alias tab completion ([#5269](https://github.com/NousResearch/hermes-agent/pull/5269))
|
||||
- **Preserve dots in OpenCode Go model names** (minimax-m2.7, glm-4.5, kimi-k2.5) ([#5597](https://github.com/NousResearch/hermes-agent/pull/5597))
|
||||
- **MiniMax models 404 fix** — strip /v1 from Anthropic base URL for OpenCode Go ([#4918](https://github.com/NousResearch/hermes-agent/pull/4918))
|
||||
- **Provider credential reset windows** honored in pooled failover ([#5188](https://github.com/NousResearch/hermes-agent/pull/5188))
|
||||
- **OAuth token sync** between credential pool and credentials file ([#4981](https://github.com/NousResearch/hermes-agent/pull/4981))
|
||||
- **Stale OAuth credentials** no longer block OpenRouter users on auto-detect ([#5746](https://github.com/NousResearch/hermes-agent/pull/5746))
|
||||
- **Codex OAuth credential pool disconnect** + expired token import fix ([#5681](https://github.com/NousResearch/hermes-agent/pull/5681))
|
||||
- **Codex pool entry sync** from `~/.codex/auth.json` on exhaustion — @GratefulDave ([#5610](https://github.com/NousResearch/hermes-agent/pull/5610))
|
||||
- **Auxiliary client payment fallback** — retry with next provider on 402 ([#5599](https://github.com/NousResearch/hermes-agent/pull/5599))
|
||||
- **Auxiliary client resolves named custom providers** and 'main' alias ([#5978](https://github.com/NousResearch/hermes-agent/pull/5978))
|
||||
- **Use mimo-v2-pro** for non-vision auxiliary tasks on Nous free tier ([#6018](https://github.com/NousResearch/hermes-agent/pull/6018))
|
||||
- **Vision auto-detection** tries main provider first ([#6041](https://github.com/NousResearch/hermes-agent/pull/6041))
|
||||
- **Provider re-ordering and Quick Install** — @austinpickett ([#4664](https://github.com/NousResearch/hermes-agent/pull/4664))
|
||||
- **Nous OAuth access_token** no longer used as inference API key — @SHL0MS ([#5564](https://github.com/NousResearch/hermes-agent/pull/5564))
|
||||
- **HERMES_PORTAL_BASE_URL env var** respected during Nous login — @benbarclay ([#5745](https://github.com/NousResearch/hermes-agent/pull/5745))
|
||||
- **Env var overrides** for Nous portal/inference URLs ([#5419](https://github.com/NousResearch/hermes-agent/pull/5419))
|
||||
- **Z.AI endpoint auto-detect** via probe and cache ([#5763](https://github.com/NousResearch/hermes-agent/pull/5763))
|
||||
- **MiniMax context lengths, model catalog, thinking guard, aux model, and config base_url** corrections ([#6082](https://github.com/NousResearch/hermes-agent/pull/6082))
|
||||
- **Community provider/model resolution fixes** — salvaged 4 community PRs + MiniMax aux URL ([#5983](https://github.com/NousResearch/hermes-agent/pull/5983))
|
||||
|
||||
### Agent Loop & Conversation
|
||||
- **Self-optimized GPT/Codex tool-use guidance** via automated behavioral benchmarking — agent self-diagnosed and patched 5 failure modes ([#6120](https://github.com/NousResearch/hermes-agent/pull/6120))
|
||||
- **GPT/Codex execution discipline guidance** in system prompts ([#5414](https://github.com/NousResearch/hermes-agent/pull/5414))
|
||||
- **Thinking-only prefill continuation** for structured reasoning responses ([#5931](https://github.com/NousResearch/hermes-agent/pull/5931))
|
||||
- **Accept reasoning-only responses** without retries — set content to "(empty)" instead of infinite retry ([#5278](https://github.com/NousResearch/hermes-agent/pull/5278))
|
||||
- **Jittered retry backoff** — exponential backoff with jitter for API retries ([#6048](https://github.com/NousResearch/hermes-agent/pull/6048))
|
||||
- **Smart thinking block signature management** — preserve and manage Anthropic thinking signatures across turns ([#6112](https://github.com/NousResearch/hermes-agent/pull/6112))
|
||||
- **Coerce tool call arguments** to match JSON Schema types — fixes models that send strings instead of numbers/booleans ([#5265](https://github.com/NousResearch/hermes-agent/pull/5265))
|
||||
- **Save oversized tool results to file** instead of destructive truncation ([#5210](https://github.com/NousResearch/hermes-agent/pull/5210))
|
||||
- **Sandbox-aware tool result persistence** ([#6085](https://github.com/NousResearch/hermes-agent/pull/6085))
|
||||
- **Streaming fallback** improved after edit failures ([#6110](https://github.com/NousResearch/hermes-agent/pull/6110))
|
||||
- **Codex empty-output gaps** covered in fallback + normalizer + auxiliary client ([#5724](https://github.com/NousResearch/hermes-agent/pull/5724), [#5730](https://github.com/NousResearch/hermes-agent/pull/5730), [#5734](https://github.com/NousResearch/hermes-agent/pull/5734))
|
||||
- **Codex stream output backfill** from output_item.done events ([#5689](https://github.com/NousResearch/hermes-agent/pull/5689))
|
||||
- **Stream consumer creates new message** after tool boundaries ([#5739](https://github.com/NousResearch/hermes-agent/pull/5739))
|
||||
- **Codex validation aligned** with normalization for empty stream output ([#5940](https://github.com/NousResearch/hermes-agent/pull/5940))
|
||||
- **Bridge tool-calls** in copilot-acp adapter ([#5460](https://github.com/NousResearch/hermes-agent/pull/5460))
|
||||
- **Filter transcript-only roles** from chat-completions payload ([#4880](https://github.com/NousResearch/hermes-agent/pull/4880))
|
||||
- **Context compaction failures fixed** on temperature-restricted models — @MadKangYu ([#5608](https://github.com/NousResearch/hermes-agent/pull/5608))
|
||||
- **Sanitize tool_calls for all strict APIs** (Fireworks, Mistral, etc.) — @lumethegreat ([#5183](https://github.com/NousResearch/hermes-agent/pull/5183))
|
||||
|
||||
### Memory & Sessions
|
||||
- **Supermemory memory provider** — new memory plugin with multi-container, search_mode, identity template, and env var override ([#5737](https://github.com/NousResearch/hermes-agent/pull/5737), [#5933](https://github.com/NousResearch/hermes-agent/pull/5933))
|
||||
- **Shared thread sessions** by default — multi-user thread support across gateway platforms ([#5391](https://github.com/NousResearch/hermes-agent/pull/5391))
|
||||
- **Subagent sessions linked to parent** and hidden from session list ([#5309](https://github.com/NousResearch/hermes-agent/pull/5309))
|
||||
- **Profile-scoped memory isolation** and clone support ([#4845](https://github.com/NousResearch/hermes-agent/pull/4845))
|
||||
- **Thread gateway user_id to memory plugins** for per-user scoping ([#5895](https://github.com/NousResearch/hermes-agent/pull/5895))
|
||||
- **Honcho plugin drift overhaul** + plugin CLI registration system ([#5295](https://github.com/NousResearch/hermes-agent/pull/5295))
|
||||
- **Honcho holographic prompt and trust score** rendering preserved ([#4872](https://github.com/NousResearch/hermes-agent/pull/4872))
|
||||
- **Honcho doctor fix** — use recall_mode instead of memory_mode — @techguysimon ([#5645](https://github.com/NousResearch/hermes-agent/pull/5645))
|
||||
- **RetainDB** — API routes, write queue, dialectic, agent model, file tools fixes ([#5461](https://github.com/NousResearch/hermes-agent/pull/5461))
|
||||
- **Hindsight memory plugin overhaul** + memory setup wizard fixes ([#5094](https://github.com/NousResearch/hermes-agent/pull/5094))
|
||||
- **mem0 API v2 compat**, prefetch context fencing, secret redaction ([#5423](https://github.com/NousResearch/hermes-agent/pull/5423))
|
||||
- **mem0 env vars merged** with mem0.json instead of either/or ([#4939](https://github.com/NousResearch/hermes-agent/pull/4939))
|
||||
- **Clean user message** used for all memory provider operations ([#4940](https://github.com/NousResearch/hermes-agent/pull/4940))
|
||||
- **Silent memory flush failure** on /new and /resume fixed — @ryanautomated ([#5640](https://github.com/NousResearch/hermes-agent/pull/5640))
|
||||
- **OpenViking atexit safety net** for session commit ([#5664](https://github.com/NousResearch/hermes-agent/pull/5664))
|
||||
- **OpenViking tenant-scoping headers** for multi-tenant servers ([#4936](https://github.com/NousResearch/hermes-agent/pull/4936))
|
||||
- **ByteRover brv query** runs synchronously before LLM call ([#4831](https://github.com/NousResearch/hermes-agent/pull/4831))
|
||||
|
||||
---
|
||||
|
||||
## 📱 Messaging Platforms (Gateway)
|
||||
|
||||
### Gateway Core
|
||||
- **Inactivity-based agent timeout** — replaces wall-clock timeout with smart activity tracking; long-running active tasks never killed ([#5389](https://github.com/NousResearch/hermes-agent/pull/5389))
|
||||
- **Approval buttons for Slack & Telegram** + Slack thread context preservation ([#5890](https://github.com/NousResearch/hermes-agent/pull/5890))
|
||||
- **Live-stream /update output** + forward interactive prompts to user ([#5180](https://github.com/NousResearch/hermes-agent/pull/5180))
|
||||
- **Infinite timeout support** + periodic notifications + actionable error messages ([#4959](https://github.com/NousResearch/hermes-agent/pull/4959))
|
||||
- **Duplicate message prevention** — gateway dedup + partial stream guard ([#4878](https://github.com/NousResearch/hermes-agent/pull/4878))
|
||||
- **Webhook delivery_info persistence** + full session id in /status ([#5942](https://github.com/NousResearch/hermes-agent/pull/5942))
|
||||
- **Tool preview truncation** respects tool_preview_length in all/new progress modes ([#5937](https://github.com/NousResearch/hermes-agent/pull/5937))
|
||||
- **Short preview truncation** restored for all/new tool progress modes ([#4935](https://github.com/NousResearch/hermes-agent/pull/4935))
|
||||
- **Update-pending state** written atomically to prevent corruption ([#4923](https://github.com/NousResearch/hermes-agent/pull/4923))
|
||||
- **Approval session key isolated** per turn ([#4884](https://github.com/NousResearch/hermes-agent/pull/4884))
|
||||
- **Active-session guard bypass** for /approve, /deny, /stop, /new ([#4926](https://github.com/NousResearch/hermes-agent/pull/4926), [#5765](https://github.com/NousResearch/hermes-agent/pull/5765))
|
||||
- **Typing indicator paused** during approval waits ([#5893](https://github.com/NousResearch/hermes-agent/pull/5893))
|
||||
- **Caption check** uses exact line-by-line match instead of substring (all platforms) ([#5939](https://github.com/NousResearch/hermes-agent/pull/5939))
|
||||
- **MEDIA: tags stripped** from streamed gateway messages ([#5152](https://github.com/NousResearch/hermes-agent/pull/5152))
|
||||
- **MEDIA: tags extracted** from cron delivery before sending ([#5598](https://github.com/NousResearch/hermes-agent/pull/5598))
|
||||
- **Profile-aware service units** + voice transcription cleanup ([#5972](https://github.com/NousResearch/hermes-agent/pull/5972))
|
||||
- **Thread-safe PairingStore** with atomic writes — @CharlieKerfoot ([#5656](https://github.com/NousResearch/hermes-agent/pull/5656))
|
||||
- **Sanitize media URLs** in base platform logs — @WAXLYY ([#5631](https://github.com/NousResearch/hermes-agent/pull/5631))
|
||||
- **Reduce Telegram fallback IP activation log noise** — @MadKangYu ([#5615](https://github.com/NousResearch/hermes-agent/pull/5615))
|
||||
- **Cron static method wrappers** to prevent self-binding ([#5299](https://github.com/NousResearch/hermes-agent/pull/5299))
|
||||
- **Stale 'hermes login' replaced** with 'hermes auth' + credential removal re-seeding fix ([#5670](https://github.com/NousResearch/hermes-agent/pull/5670))
|
||||
|
||||
### Telegram
|
||||
- **Group topics skill binding** for supergroup forum topics ([#4886](https://github.com/NousResearch/hermes-agent/pull/4886))
|
||||
- **Emoji reactions** for approval status and notifications ([#5975](https://github.com/NousResearch/hermes-agent/pull/5975))
|
||||
- **Duplicate message delivery prevented** on send timeout ([#5153](https://github.com/NousResearch/hermes-agent/pull/5153))
|
||||
- **Command names sanitized** to strip invalid characters ([#5596](https://github.com/NousResearch/hermes-agent/pull/5596))
|
||||
- **Per-platform disabled skills** respected in Telegram menu and gateway dispatch ([#4799](https://github.com/NousResearch/hermes-agent/pull/4799))
|
||||
- **/approve and /deny** routed through running-agent guard ([#4798](https://github.com/NousResearch/hermes-agent/pull/4798))
|
||||
|
||||
### Discord
|
||||
- **Channel controls** — ignored_channels and no_thread_channels config options ([#5975](https://github.com/NousResearch/hermes-agent/pull/5975))
|
||||
- **Skills registered as native slash commands** via shared gateway logic ([#5603](https://github.com/NousResearch/hermes-agent/pull/5603))
|
||||
- **/approve, /deny, /queue, /background, /btw** registered as native slash commands ([#4800](https://github.com/NousResearch/hermes-agent/pull/4800), [#5477](https://github.com/NousResearch/hermes-agent/pull/5477))
|
||||
- **Unnecessary members intent** removed on startup + token lock leak fix ([#5302](https://github.com/NousResearch/hermes-agent/pull/5302))
|
||||
|
||||
### Slack
|
||||
- **Thread engagement** — auto-respond in bot-started and mentioned threads ([#5897](https://github.com/NousResearch/hermes-agent/pull/5897))
|
||||
- **mrkdwn in edit_message** + thread replies without @mentions ([#5733](https://github.com/NousResearch/hermes-agent/pull/5733))
|
||||
|
||||
### Matrix
|
||||
- **Tier 1 feature parity** — reactions, read receipts, rich formatting, room management ([#5275](https://github.com/NousResearch/hermes-agent/pull/5275))
|
||||
- **MATRIX_REQUIRE_MENTION and MATRIX_AUTO_THREAD** support ([#5106](https://github.com/NousResearch/hermes-agent/pull/5106))
|
||||
- **Comprehensive reliability** — encrypted media, auth recovery, cron E2EE, Synapse compat ([#5271](https://github.com/NousResearch/hermes-agent/pull/5271))
|
||||
- **CJK input, E2EE, and reconnect** fixes ([#5665](https://github.com/NousResearch/hermes-agent/pull/5665))
|
||||
|
||||
### Signal
|
||||
- **Full MEDIA: tag delivery** — send_image_file, send_voice, and send_video implemented ([#5602](https://github.com/NousResearch/hermes-agent/pull/5602))
|
||||
|
||||
### Mattermost
|
||||
- **File attachments** — set message type to DOCUMENT when post has file attachments — @nericervin ([#5609](https://github.com/NousResearch/hermes-agent/pull/5609))
|
||||
|
||||
### Feishu
|
||||
- **Interactive card approval buttons** ([#6043](https://github.com/NousResearch/hermes-agent/pull/6043))
|
||||
- **Reconnect and ACL** fixes ([#5665](https://github.com/NousResearch/hermes-agent/pull/5665))
|
||||
|
||||
### Webhooks
|
||||
- **`{__raw__}` template token** and thread_id passthrough for forum topics ([#5662](https://github.com/NousResearch/hermes-agent/pull/5662))
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ CLI & User Experience
|
||||
|
||||
### Interactive CLI
|
||||
- **Defer response content** until reasoning block completes ([#5773](https://github.com/NousResearch/hermes-agent/pull/5773))
|
||||
- **Ghost status-bar lines cleared** on terminal resize ([#4960](https://github.com/NousResearch/hermes-agent/pull/4960))
|
||||
- **Normalise \r\n and \r line endings** in pasted text ([#4849](https://github.com/NousResearch/hermes-agent/pull/4849))
|
||||
- **ChatConsole errors, curses scroll, skin-aware banner, git state** banner fixes ([#5974](https://github.com/NousResearch/hermes-agent/pull/5974))
|
||||
- **Native Windows image paste** support ([#5917](https://github.com/NousResearch/hermes-agent/pull/5917))
|
||||
- **--yolo and other flags** no longer silently dropped when placed before 'chat' subcommand ([#5145](https://github.com/NousResearch/hermes-agent/pull/5145))
|
||||
|
||||
### Setup & Configuration
|
||||
- **Config structure validation** — detect malformed YAML at startup with actionable error messages ([#5426](https://github.com/NousResearch/hermes-agent/pull/5426))
|
||||
- **Centralized logging** to `~/.hermes/logs/` — agent.log (INFO+), errors.log (WARNING+) with `hermes logs` command ([#5430](https://github.com/NousResearch/hermes-agent/pull/5430))
|
||||
- **Docs links added** to setup wizard sections ([#5283](https://github.com/NousResearch/hermes-agent/pull/5283))
|
||||
- **Doctor diagnostics** — sync provider checks, config migration, WAL and mem0 diagnostics ([#5077](https://github.com/NousResearch/hermes-agent/pull/5077))
|
||||
- **Timeout debug logging** and user-facing diagnostics improved ([#5370](https://github.com/NousResearch/hermes-agent/pull/5370))
|
||||
- **Reasoning effort unified** to config.yaml only ([#6118](https://github.com/NousResearch/hermes-agent/pull/6118))
|
||||
- **Permanent command allowlist** loaded on startup ([#5076](https://github.com/NousResearch/hermes-agent/pull/5076))
|
||||
- **`hermes auth remove`** now clears env-seeded credentials permanently ([#5285](https://github.com/NousResearch/hermes-agent/pull/5285))
|
||||
- **Bundled skills synced to all profiles** during update ([#5795](https://github.com/NousResearch/hermes-agent/pull/5795))
|
||||
- **`hermes update` no longer kills** freshly-restarted gateway service ([#5448](https://github.com/NousResearch/hermes-agent/pull/5448))
|
||||
- **Subprocess.run() timeouts** added to all gateway CLI commands ([#5424](https://github.com/NousResearch/hermes-agent/pull/5424))
|
||||
- **Actionable error message** when Codex refresh token is reused — @tymrtn ([#5612](https://github.com/NousResearch/hermes-agent/pull/5612))
|
||||
- **Google-workspace skill scripts** can now run directly — @xinbenlv ([#5624](https://github.com/NousResearch/hermes-agent/pull/5624))
|
||||
|
||||
### Cron System
|
||||
- **Inactivity-based cron timeout** — replaces wall-clock; active tasks run indefinitely ([#5440](https://github.com/NousResearch/hermes-agent/pull/5440))
|
||||
- **Pre-run script injection** for data collection and change detection ([#5082](https://github.com/NousResearch/hermes-agent/pull/5082))
|
||||
- **Delivery failure tracking** in job status ([#6042](https://github.com/NousResearch/hermes-agent/pull/6042))
|
||||
- **Delivery guidance** in cron prompts — stops send_message thrashing ([#5444](https://github.com/NousResearch/hermes-agent/pull/5444))
|
||||
- **MEDIA files delivered** as native platform attachments ([#5921](https://github.com/NousResearch/hermes-agent/pull/5921))
|
||||
- **[SILENT] suppression** works anywhere in response — @auspic7 ([#5654](https://github.com/NousResearch/hermes-agent/pull/5654))
|
||||
- **Cron path traversal** hardening ([#5147](https://github.com/NousResearch/hermes-agent/pull/5147))
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Tool System
|
||||
|
||||
### Terminal & Execution
|
||||
- **Execute_code on remote backends** — code execution now works on Docker, SSH, Modal, and other remote terminal backends ([#5088](https://github.com/NousResearch/hermes-agent/pull/5088))
|
||||
- **Exit code context** for common CLI tools in terminal results — helps agent understand what went wrong ([#5144](https://github.com/NousResearch/hermes-agent/pull/5144))
|
||||
- **Progressive subdirectory hint discovery** — agent learns project structure as it navigates ([#5291](https://github.com/NousResearch/hermes-agent/pull/5291))
|
||||
- **notify_on_complete for background processes** — get notified when long-running tasks finish ([#5779](https://github.com/NousResearch/hermes-agent/pull/5779))
|
||||
- **Docker env config** — explicit container environment variables via docker_env config ([#4738](https://github.com/NousResearch/hermes-agent/pull/4738))
|
||||
- **Approval metadata included** in terminal tool results ([#5141](https://github.com/NousResearch/hermes-agent/pull/5141))
|
||||
- **Workdir parameter sanitized** in terminal tool across all backends ([#5629](https://github.com/NousResearch/hermes-agent/pull/5629))
|
||||
- **Detached process crash recovery** state corrected ([#6101](https://github.com/NousResearch/hermes-agent/pull/6101))
|
||||
- **Agent-browser paths with spaces** preserved — @Vasanthdev2004 ([#6077](https://github.com/NousResearch/hermes-agent/pull/6077))
|
||||
- **Portable base64 encoding** for image reading on macOS — @CharlieKerfoot ([#5657](https://github.com/NousResearch/hermes-agent/pull/5657))
|
||||
|
||||
### Browser
|
||||
- **Switch managed browser provider** from Browserbase to Browser Use — @benbarclay ([#5750](https://github.com/NousResearch/hermes-agent/pull/5750))
|
||||
- **Firecrawl cloud browser** provider — @alt-glitch ([#5628](https://github.com/NousResearch/hermes-agent/pull/5628))
|
||||
- **JS evaluation** via browser_console expression parameter ([#5303](https://github.com/NousResearch/hermes-agent/pull/5303))
|
||||
- **Windows browser** fixes ([#5665](https://github.com/NousResearch/hermes-agent/pull/5665))
|
||||
|
||||
### MCP
|
||||
- **MCP OAuth 2.1 PKCE** — full standards-compliant OAuth client support ([#5420](https://github.com/NousResearch/hermes-agent/pull/5420))
|
||||
- **OSV malware check** for MCP extension packages ([#5305](https://github.com/NousResearch/hermes-agent/pull/5305))
|
||||
- **Prefer structuredContent over text** + no_mcp sentinel ([#5979](https://github.com/NousResearch/hermes-agent/pull/5979))
|
||||
- **Unknown toolsets warning suppressed** for MCP server names ([#5279](https://github.com/NousResearch/hermes-agent/pull/5279))
|
||||
|
||||
### Web & Files
|
||||
- **.zip document support** + auto-mount cache dirs into remote backends ([#4846](https://github.com/NousResearch/hermes-agent/pull/4846))
|
||||
- **Redact query secrets** in send_message errors — @WAXLYY ([#5650](https://github.com/NousResearch/hermes-agent/pull/5650))
|
||||
|
||||
### Delegation
|
||||
- **Credential pool sharing** + workspace path hints for subagents ([#5748](https://github.com/NousResearch/hermes-agent/pull/5748))
|
||||
|
||||
### ACP (VS Code / Zed / JetBrains)
|
||||
- **Aggregate ACP improvements** — auth compat, protocol fixes, command ads, delegation, SSE events ([#5292](https://github.com/NousResearch/hermes-agent/pull/5292))
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Skills Ecosystem
|
||||
|
||||
### Skills System
|
||||
- **Skill config interface** — skills can declare required config.yaml settings, prompted during setup, injected at load time ([#5635](https://github.com/NousResearch/hermes-agent/pull/5635))
|
||||
- **Plugin CLI registration system** — plugins register their own CLI subcommands without touching main.py ([#5295](https://github.com/NousResearch/hermes-agent/pull/5295))
|
||||
- **Request-scoped API hooks** with tool call correlation IDs for plugins ([#5427](https://github.com/NousResearch/hermes-agent/pull/5427))
|
||||
- **Session lifecycle hooks** — on_session_finalize and on_session_reset for CLI + gateway ([#6129](https://github.com/NousResearch/hermes-agent/pull/6129))
|
||||
- **Prompt for required env vars** during plugin install — @kshitijk4poor ([#5470](https://github.com/NousResearch/hermes-agent/pull/5470))
|
||||
- **Plugin name validation** — reject names that resolve to plugins root ([#5368](https://github.com/NousResearch/hermes-agent/pull/5368))
|
||||
- **pre_llm_call plugin context** moved to user message to preserve prompt cache ([#5146](https://github.com/NousResearch/hermes-agent/pull/5146))
|
||||
|
||||
### New & Updated Skills
|
||||
- **popular-web-designs** — 54 production website design systems ([#5194](https://github.com/NousResearch/hermes-agent/pull/5194))
|
||||
- **p5js creative coding** — @SHL0MS ([#5600](https://github.com/NousResearch/hermes-agent/pull/5600))
|
||||
- **manim-video** — mathematical and technical animations — @SHL0MS ([#4930](https://github.com/NousResearch/hermes-agent/pull/4930))
|
||||
- **llm-wiki** — Karpathy's LLM Wiki skill ([#5635](https://github.com/NousResearch/hermes-agent/pull/5635))
|
||||
- **gitnexus-explorer** — codebase indexing and knowledge serving ([#5208](https://github.com/NousResearch/hermes-agent/pull/5208))
|
||||
- **research-paper-writing** — AI-Scientist & GPT-Researcher patterns — @SHL0MS ([#5421](https://github.com/NousResearch/hermes-agent/pull/5421))
|
||||
- **blogwatcher** updated to JulienTant's fork ([#5759](https://github.com/NousResearch/hermes-agent/pull/5759))
|
||||
- **claude-code skill** comprehensive rewrite v2.0 + v2.2 ([#5155](https://github.com/NousResearch/hermes-agent/pull/5155), [#5158](https://github.com/NousResearch/hermes-agent/pull/5158))
|
||||
- **Code verification skills** consolidated into one ([#4854](https://github.com/NousResearch/hermes-agent/pull/4854))
|
||||
- **Manim CE reference docs** expanded — geometry, animations, LaTeX — @leotrs ([#5791](https://github.com/NousResearch/hermes-agent/pull/5791))
|
||||
- **Manim-video references** — design thinking, updaters, paper explainer, decorations, production quality — @SHL0MS ([#5588](https://github.com/NousResearch/hermes-agent/pull/5588), [#5408](https://github.com/NousResearch/hermes-agent/pull/5408))
|
||||
|
||||
---
|
||||
|
||||
## 🔒 Security & Reliability
|
||||
|
||||
### Security Hardening
|
||||
- **Consolidated security** — SSRF protections, timing attack mitigations, tar traversal prevention, credential leakage guards ([#5944](https://github.com/NousResearch/hermes-agent/pull/5944))
|
||||
- **Cross-session isolation** + cron path traversal hardening ([#5613](https://github.com/NousResearch/hermes-agent/pull/5613))
|
||||
- **Workdir parameter sanitized** in terminal tool across all backends ([#5629](https://github.com/NousResearch/hermes-agent/pull/5629))
|
||||
- **Approval 'once' session escalation** prevented + cron delivery platform validation ([#5280](https://github.com/NousResearch/hermes-agent/pull/5280))
|
||||
- **Profile-scoped Google Workspace OAuth tokens** protected ([#4910](https://github.com/NousResearch/hermes-agent/pull/4910))
|
||||
|
||||
### Reliability
|
||||
- **Aggressive worktree and branch cleanup** to prevent accumulation ([#6134](https://github.com/NousResearch/hermes-agent/pull/6134))
|
||||
- **O(n²) catastrophic backtracking** in redact regex fixed — 100x improvement on large outputs ([#4962](https://github.com/NousResearch/hermes-agent/pull/4962))
|
||||
- **Runtime stability fixes** across core, web, delegate, and browser tools ([#4843](https://github.com/NousResearch/hermes-agent/pull/4843))
|
||||
- **API server streaming fix** + conversation history support ([#5977](https://github.com/NousResearch/hermes-agent/pull/5977))
|
||||
- **OpenViking API endpoint paths** and response parsing corrected ([#5078](https://github.com/NousResearch/hermes-agent/pull/5078))
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Notable Bug Fixes
|
||||
|
||||
- **9 community bugfixes salvaged** — gateway, cron, deps, macOS launchd in one batch ([#5288](https://github.com/NousResearch/hermes-agent/pull/5288))
|
||||
- **Batch core bug fixes** — model config, session reset, alias fallback, launchctl, delegation, atomic writes ([#5630](https://github.com/NousResearch/hermes-agent/pull/5630))
|
||||
- **Batch gateway/platform fixes** — matrix E2EE, CJK input, Windows browser, Feishu reconnect + ACL ([#5665](https://github.com/NousResearch/hermes-agent/pull/5665))
|
||||
- **Stale test skips removed**, regex backtracking, file search bug, and test flakiness ([#4969](https://github.com/NousResearch/hermes-agent/pull/4969))
|
||||
- **Nix flake** — read version, regen uv.lock, add hermes_logging — @alt-glitch ([#5651](https://github.com/NousResearch/hermes-agent/pull/5651))
|
||||
- **Lowercase variable redaction** regression tests ([#5185](https://github.com/NousResearch/hermes-agent/pull/5185))
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
- **57 failing CI tests repaired** across 14 files ([#5823](https://github.com/NousResearch/hermes-agent/pull/5823))
|
||||
- **Test suite re-architecture** + CI failure fixes — @alt-glitch ([#5946](https://github.com/NousResearch/hermes-agent/pull/5946))
|
||||
- **Codebase-wide lint cleanup** — unused imports, dead code, and inefficient patterns ([#5821](https://github.com/NousResearch/hermes-agent/pull/5821))
|
||||
- **browser_close tool removed** — auto-cleanup handles it ([#5792](https://github.com/NousResearch/hermes-agent/pull/5792))
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **Comprehensive documentation audit** — fix stale info, expand thin pages, add depth ([#5393](https://github.com/NousResearch/hermes-agent/pull/5393))
|
||||
- **40+ discrepancies fixed** between documentation and codebase ([#5818](https://github.com/NousResearch/hermes-agent/pull/5818))
|
||||
- **13 features documented** from last week's PRs ([#5815](https://github.com/NousResearch/hermes-agent/pull/5815))
|
||||
- **Guides section overhaul** — fix existing + add 3 new tutorials ([#5735](https://github.com/NousResearch/hermes-agent/pull/5735))
|
||||
- **Salvaged 4 docs PRs** — docker setup, post-update validation, local LLM guide, signal-cli install ([#5727](https://github.com/NousResearch/hermes-agent/pull/5727))
|
||||
- **Discord configuration reference** ([#5386](https://github.com/NousResearch/hermes-agent/pull/5386))
|
||||
- **Community FAQ entries** for common workflows and troubleshooting ([#4797](https://github.com/NousResearch/hermes-agent/pull/4797))
|
||||
- **WSL2 networking guide** for local model servers ([#5616](https://github.com/NousResearch/hermes-agent/pull/5616))
|
||||
- **Honcho CLI reference** + plugin CLI registration docs ([#5308](https://github.com/NousResearch/hermes-agent/pull/5308))
|
||||
- **Obsidian Headless setup** for servers in llm-wiki ([#5660](https://github.com/NousResearch/hermes-agent/pull/5660))
|
||||
- **Hermes Mod visual skin editor** added to skins page ([#6095](https://github.com/NousResearch/hermes-agent/pull/6095))
|
||||
|
||||
---
|
||||
|
||||
## 👥 Contributors
|
||||
|
||||
### Core
|
||||
- **@teknium1** — 179 PRs
|
||||
|
||||
### Top Community Contributors
|
||||
- **@SHL0MS** (7 PRs) — p5js creative coding skill, manim-video skill + 5 reference expansions, research-paper-writing, Nous OAuth fix, manim font fix
|
||||
- **@alt-glitch** (3 PRs) — Firecrawl cloud browser provider, test re-architecture + CI fixes, Nix flake fixes
|
||||
- **@benbarclay** (2 PRs) — Browser Use managed provider switch, Nous portal base URL fix
|
||||
- **@CharlieKerfoot** (2 PRs) — macOS portable base64 encoding, thread-safe PairingStore
|
||||
- **@WAXLYY** (2 PRs) — send_message secret redaction, gateway media URL sanitization
|
||||
- **@MadKangYu** (2 PRs) — Telegram log noise reduction, context compaction fix for temperature-restricted models
|
||||
|
||||
### All Contributors
|
||||
@alt-glitch, @austinpickett, @auspic7, @benbarclay, @CharlieKerfoot, @GratefulDave, @kshitijk4poor, @leotrs, @lumethegreat, @MadKangYu, @nericervin, @ryanautomated, @SHL0MS, @techguysimon, @tymrtn, @Vasanthdev2004, @WAXLYY, @xinbenlv
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v2026.4.3...v2026.4.8](https://github.com/NousResearch/hermes-agent/compare/v2026.4.3...v2026.4.8)
|
||||
@@ -1,329 +0,0 @@
|
||||
# Hermes Agent v0.9.0 (v2026.4.13)
|
||||
|
||||
**Release Date:** April 13, 2026
|
||||
**Since v0.8.0:** 487 commits · 269 merged PRs · 167 resolved issues · 493 files changed · 63,281 insertions · 24 contributors
|
||||
|
||||
> The everywhere release — Hermes goes mobile with Termux/Android, adds iMessage and WeChat, ships Fast Mode for OpenAI and Anthropic, introduces background process monitoring, launches a local web dashboard for managing your agent, and delivers the deepest security hardening pass yet across 16 supported platforms.
|
||||
|
||||
---
|
||||
|
||||
## ✨ Highlights
|
||||
|
||||
- **Local Web Dashboard** — A new browser-based dashboard for managing your Hermes Agent locally. Configure settings, monitor sessions, browse skills, and manage your gateway — all from a clean web interface without touching config files or the terminal. The easiest way to get started with Hermes.
|
||||
|
||||
- **Fast Mode (`/fast`)** — Priority processing for OpenAI and Anthropic models. Toggle `/fast` to route through priority queues for significantly lower latency on supported models (GPT-5.4, Codex, Claude). Expands across all OpenAI Priority Processing models and Anthropic's fast tier. ([#6875](https://github.com/NousResearch/hermes-agent/pull/6875), [#6960](https://github.com/NousResearch/hermes-agent/pull/6960), [#7037](https://github.com/NousResearch/hermes-agent/pull/7037))
|
||||
|
||||
- **iMessage via BlueBubbles** — Full iMessage integration through BlueBubbles, bringing Hermes to Apple's messaging ecosystem. Auto-webhook registration, setup wizard integration, and crash resilience. ([#6437](https://github.com/NousResearch/hermes-agent/pull/6437), [#6460](https://github.com/NousResearch/hermes-agent/pull/6460), [#6494](https://github.com/NousResearch/hermes-agent/pull/6494))
|
||||
|
||||
- **WeChat (Weixin) & WeCom Callback Mode** — Native WeChat support via iLink Bot API and a new WeCom callback-mode adapter for self-built enterprise apps. Streaming cursor, media uploads, markdown link handling, and atomic state persistence. Hermes now covers the Chinese messaging ecosystem end-to-end. ([#7166](https://github.com/NousResearch/hermes-agent/pull/7166), [#7943](https://github.com/NousResearch/hermes-agent/pull/7943))
|
||||
|
||||
- **Termux / Android Support** — Run Hermes natively on Android via Termux. Adapted install paths, TUI optimizations for mobile screens, voice backend support, and the `/image` command work on-device. ([#6834](https://github.com/NousResearch/hermes-agent/pull/6834))
|
||||
|
||||
- **Background Process Monitoring (`watch_patterns`)** — Set patterns to watch for in background process output and get notified in real-time when they match. Monitor for errors, wait for specific events ("listening on port"), or watch build logs — all without polling. ([#7635](https://github.com/NousResearch/hermes-agent/pull/7635))
|
||||
|
||||
- **Native xAI & Xiaomi MiMo Providers** — First-class provider support for xAI (Grok) and Xiaomi MiMo, with direct API access, model catalogs, and setup wizard integration. Plus Qwen OAuth with portal request support. ([#7372](https://github.com/NousResearch/hermes-agent/pull/7372), [#7855](https://github.com/NousResearch/hermes-agent/pull/7855))
|
||||
|
||||
- **Pluggable Context Engine** — Context management is now a pluggable slot via `hermes plugins`. Swap in custom context engines that control what the agent sees each turn — filtering, summarization, or domain-specific context injection. ([#7464](https://github.com/NousResearch/hermes-agent/pull/7464))
|
||||
|
||||
- **Unified Proxy Support** — SOCKS proxy, `DISCORD_PROXY`, and system proxy auto-detection across all gateway platforms. Hermes behind corporate firewalls just works. ([#6814](https://github.com/NousResearch/hermes-agent/pull/6814))
|
||||
|
||||
- **Comprehensive Security Hardening** — Path traversal protection in checkpoint manager, shell injection neutralization in sandbox writes, SSRF redirect guards in Slack image uploads, Twilio webhook signature validation (SMS RCE fix), API server auth enforcement, git argument injection prevention, and approval button authorization. ([#7933](https://github.com/NousResearch/hermes-agent/pull/7933), [#7944](https://github.com/NousResearch/hermes-agent/pull/7944), [#7940](https://github.com/NousResearch/hermes-agent/pull/7940), [#7151](https://github.com/NousResearch/hermes-agent/pull/7151), [#7156](https://github.com/NousResearch/hermes-agent/pull/7156))
|
||||
|
||||
- **`hermes backup` & `hermes import`** — Full backup and restore of your Hermes configuration, sessions, skills, and memory. Migrate between machines or create snapshots before major changes. ([#7997](https://github.com/NousResearch/hermes-agent/pull/7997))
|
||||
|
||||
- **16 Supported Platforms** — With BlueBubbles (iMessage) and WeChat joining Telegram, Discord, Slack, WhatsApp, Signal, Matrix, Email, SMS, DingTalk, Feishu, WeCom, Mattermost, Home Assistant, and Webhooks, Hermes now runs on 16 messaging platforms out of the box.
|
||||
|
||||
- **`/debug` & `hermes debug share`** — New debugging toolkit: `/debug` slash command across all platforms for quick diagnostics, plus `hermes debug share` to upload a full debug report to a pastebin for easy sharing when troubleshooting. ([#8681](https://github.com/NousResearch/hermes-agent/pull/8681))
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Core Agent & Architecture
|
||||
|
||||
### Provider & Model Support
|
||||
- **Native xAI (Grok) provider** with direct API access and model catalog ([#7372](https://github.com/NousResearch/hermes-agent/pull/7372))
|
||||
- **Xiaomi MiMo as first-class provider** — setup wizard, model catalog, empty response recovery ([#7855](https://github.com/NousResearch/hermes-agent/pull/7855))
|
||||
- **Qwen OAuth provider** with portal request support ([#6282](https://github.com/NousResearch/hermes-agent/pull/6282))
|
||||
- **Fast Mode** — `/fast` toggle for OpenAI Priority Processing + Anthropic fast tier ([#6875](https://github.com/NousResearch/hermes-agent/pull/6875), [#6960](https://github.com/NousResearch/hermes-agent/pull/6960), [#7037](https://github.com/NousResearch/hermes-agent/pull/7037))
|
||||
- **Structured API error classification** for smart failover decisions ([#6514](https://github.com/NousResearch/hermes-agent/pull/6514))
|
||||
- **Rate limit header capture** shown in `/usage` ([#6541](https://github.com/NousResearch/hermes-agent/pull/6541))
|
||||
- **API server model name** derived from profile name ([#6857](https://github.com/NousResearch/hermes-agent/pull/6857))
|
||||
- **Custom providers** now included in `/model` listings and resolution ([#7088](https://github.com/NousResearch/hermes-agent/pull/7088))
|
||||
- **Fallback provider activation** on repeated empty responses with user-visible status ([#7505](https://github.com/NousResearch/hermes-agent/pull/7505))
|
||||
- **OpenRouter variant tags** (`:free`, `:extended`, `:fast`) preserved during model switch ([#6383](https://github.com/NousResearch/hermes-agent/pull/6383))
|
||||
- **Credential exhaustion TTL** reduced from 24 hours to 1 hour ([#6504](https://github.com/NousResearch/hermes-agent/pull/6504))
|
||||
- **OAuth credential lifecycle** hardening — stale pool keys, auth.json sync, Codex CLI race fixes ([#6874](https://github.com/NousResearch/hermes-agent/pull/6874))
|
||||
- Empty response recovery for reasoning models (MiMo, Qwen, GLM) ([#8609](https://github.com/NousResearch/hermes-agent/pull/8609))
|
||||
- MiniMax context lengths, thinking guard, endpoint corrections ([#6082](https://github.com/NousResearch/hermes-agent/pull/6082), [#7126](https://github.com/NousResearch/hermes-agent/pull/7126))
|
||||
- Z.AI endpoint auto-detect via probe and cache ([#5763](https://github.com/NousResearch/hermes-agent/pull/5763))
|
||||
|
||||
### Agent Loop & Conversation
|
||||
- **Pluggable context engine slot** via `hermes plugins` ([#7464](https://github.com/NousResearch/hermes-agent/pull/7464))
|
||||
- **Background process monitoring** — `watch_patterns` for real-time output alerts ([#7635](https://github.com/NousResearch/hermes-agent/pull/7635))
|
||||
- **Improved context compression** — higher limits, tool tracking, degradation warnings, token-budget tail protection ([#6395](https://github.com/NousResearch/hermes-agent/pull/6395), [#6453](https://github.com/NousResearch/hermes-agent/pull/6453))
|
||||
- **`/compress <focus>`** — guided compression with a focus topic ([#8017](https://github.com/NousResearch/hermes-agent/pull/8017))
|
||||
- **Tiered context pressure warnings** with gateway dedup ([#6411](https://github.com/NousResearch/hermes-agent/pull/6411))
|
||||
- **Staged inactivity warning** before timeout escalation ([#6387](https://github.com/NousResearch/hermes-agent/pull/6387))
|
||||
- **Prevent agent from stopping mid-task** — compression floor, budget overhaul, activity tracking ([#7983](https://github.com/NousResearch/hermes-agent/pull/7983))
|
||||
- **Propagate child activity to parent** during `delegate_task` ([#7295](https://github.com/NousResearch/hermes-agent/pull/7295))
|
||||
- **Truncated streaming tool call detection** before execution ([#6847](https://github.com/NousResearch/hermes-agent/pull/6847))
|
||||
- Empty response retry (3 attempts with nudge) ([#6488](https://github.com/NousResearch/hermes-agent/pull/6488))
|
||||
- Adaptive streaming backoff + cursor strip to prevent message truncation ([#7683](https://github.com/NousResearch/hermes-agent/pull/7683))
|
||||
- Compression uses live session model instead of stale persisted config ([#8258](https://github.com/NousResearch/hermes-agent/pull/8258))
|
||||
- Strip `<thought>` tags from Gemma 4 responses ([#8562](https://github.com/NousResearch/hermes-agent/pull/8562))
|
||||
- Prevent `<think>` in prose from suppressing response output ([#6968](https://github.com/NousResearch/hermes-agent/pull/6968))
|
||||
- Turn-exit diagnostic logging to agent loop ([#6549](https://github.com/NousResearch/hermes-agent/pull/6549))
|
||||
- Scope tool interrupt signal per-thread to prevent cross-session leaks ([#7930](https://github.com/NousResearch/hermes-agent/pull/7930))
|
||||
|
||||
### Memory & Sessions
|
||||
- **Hindsight memory plugin** — feature parity, setup wizard, config improvements — @nicoloboschi ([#6428](https://github.com/NousResearch/hermes-agent/pull/6428))
|
||||
- **Honcho** — opt-in `initOnSessionStart` for tools mode — @Kathie-yu ([#6995](https://github.com/NousResearch/hermes-agent/pull/6995))
|
||||
- Orphan children instead of cascade-deleting in prune/delete ([#6513](https://github.com/NousResearch/hermes-agent/pull/6513))
|
||||
- Doctor command only checks the active memory provider ([#6285](https://github.com/NousResearch/hermes-agent/pull/6285))
|
||||
|
||||
---
|
||||
|
||||
## 📱 Messaging Platforms (Gateway)
|
||||
|
||||
### New Platforms
|
||||
- **BlueBubbles (iMessage)** — full adapter with auto-webhook registration, setup wizard, and crash resilience ([#6437](https://github.com/NousResearch/hermes-agent/pull/6437), [#6460](https://github.com/NousResearch/hermes-agent/pull/6460), [#6494](https://github.com/NousResearch/hermes-agent/pull/6494), [#7107](https://github.com/NousResearch/hermes-agent/pull/7107))
|
||||
- **Weixin (WeChat)** — native support via iLink Bot API with streaming, media uploads, markdown links ([#7166](https://github.com/NousResearch/hermes-agent/pull/7166), [#8665](https://github.com/NousResearch/hermes-agent/pull/8665))
|
||||
- **WeCom Callback Mode** — self-built enterprise app adapter with atomic state persistence ([#7943](https://github.com/NousResearch/hermes-agent/pull/7943), [#7928](https://github.com/NousResearch/hermes-agent/pull/7928))
|
||||
|
||||
### Discord
|
||||
- **Allowed channels whitelist** config — @jarvis-phw ([#7044](https://github.com/NousResearch/hermes-agent/pull/7044))
|
||||
- **Forum channel topic inheritance** in thread sessions — @hermes-agent-dhabibi ([#6377](https://github.com/NousResearch/hermes-agent/pull/6377))
|
||||
- **DISCORD_REPLY_TO_MODE** setting ([#6333](https://github.com/NousResearch/hermes-agent/pull/6333))
|
||||
- Accept `.log` attachments, raise document size limit — @kira-ariaki ([#6467](https://github.com/NousResearch/hermes-agent/pull/6467))
|
||||
- Decouple readiness from slash sync ([#8016](https://github.com/NousResearch/hermes-agent/pull/8016))
|
||||
|
||||
### Slack
|
||||
- **Consolidated Slack improvements** — 7 community PRs salvaged into one ([#6809](https://github.com/NousResearch/hermes-agent/pull/6809))
|
||||
- Handle assistant thread lifecycle events ([#6433](https://github.com/NousResearch/hermes-agent/pull/6433))
|
||||
|
||||
### Matrix
|
||||
- **Migrated from matrix-nio to mautrix-python** ([#7518](https://github.com/NousResearch/hermes-agent/pull/7518))
|
||||
- SQLite crypto store replacing pickle (fixes E2EE decryption) — @alt-glitch ([#7981](https://github.com/NousResearch/hermes-agent/pull/7981))
|
||||
- Cross-signing recovery key verification for E2EE migration ([#8282](https://github.com/NousResearch/hermes-agent/pull/8282))
|
||||
- DM mention threads + group chat events for Feishu ([#7423](https://github.com/NousResearch/hermes-agent/pull/7423))
|
||||
|
||||
### Gateway Core
|
||||
- **Unified proxy support** — SOCKS, DISCORD_PROXY, multi-platform with macOS auto-detection ([#6814](https://github.com/NousResearch/hermes-agent/pull/6814))
|
||||
- **Inbound text batching** for Discord, Matrix, WeCom + adaptive delay ([#6979](https://github.com/NousResearch/hermes-agent/pull/6979))
|
||||
- **Surface natural mid-turn assistant messages** in chat platforms ([#7978](https://github.com/NousResearch/hermes-agent/pull/7978))
|
||||
- **WSL-aware gateway** with smart systemd detection ([#7510](https://github.com/NousResearch/hermes-agent/pull/7510))
|
||||
- **All missing platforms added to setup wizard** ([#7949](https://github.com/NousResearch/hermes-agent/pull/7949))
|
||||
- **Per-platform `tool_progress` overrides** ([#6348](https://github.com/NousResearch/hermes-agent/pull/6348))
|
||||
- **Configurable 'still working' notification interval** ([#8572](https://github.com/NousResearch/hermes-agent/pull/8572))
|
||||
- `/model` switch persists across messages ([#7081](https://github.com/NousResearch/hermes-agent/pull/7081))
|
||||
- `/usage` shows rate limits, cost, and token details between turns ([#7038](https://github.com/NousResearch/hermes-agent/pull/7038))
|
||||
- Drain in-flight work before restart ([#7503](https://github.com/NousResearch/hermes-agent/pull/7503))
|
||||
- Don't evict cached agent on failed runs — prevents MCP restart loop ([#7539](https://github.com/NousResearch/hermes-agent/pull/7539))
|
||||
- Replace `os.environ` session state with `contextvars` ([#7454](https://github.com/NousResearch/hermes-agent/pull/7454))
|
||||
- Derive channel directory platforms from enum instead of hardcoded list ([#7450](https://github.com/NousResearch/hermes-agent/pull/7450))
|
||||
- Validate image downloads before caching (cross-platform) ([#7125](https://github.com/NousResearch/hermes-agent/pull/7125))
|
||||
- Cross-platform webhook delivery for all platforms ([#7095](https://github.com/NousResearch/hermes-agent/pull/7095))
|
||||
- Cron Discord thread_id delivery support ([#7106](https://github.com/NousResearch/hermes-agent/pull/7106))
|
||||
- Feishu QR-based bot onboarding ([#8570](https://github.com/NousResearch/hermes-agent/pull/8570))
|
||||
- Gateway status scoped to active profile ([#7951](https://github.com/NousResearch/hermes-agent/pull/7951))
|
||||
- Prevent background process notifications from triggering false pairing requests ([#6434](https://github.com/NousResearch/hermes-agent/pull/6434))
|
||||
|
||||
---
|
||||
|
||||
## 🖥️ CLI & User Experience
|
||||
|
||||
### Interactive CLI
|
||||
- **Termux / Android support** — adapted install paths, TUI, voice, `/image` ([#6834](https://github.com/NousResearch/hermes-agent/pull/6834))
|
||||
- **Native `/model` picker modal** for provider → model selection ([#8003](https://github.com/NousResearch/hermes-agent/pull/8003))
|
||||
- **Live per-tool elapsed timer** restored in TUI spinner ([#7359](https://github.com/NousResearch/hermes-agent/pull/7359))
|
||||
- **Stacked tool progress scrollback** in TUI ([#8201](https://github.com/NousResearch/hermes-agent/pull/8201))
|
||||
- **Random tips on new session start** (CLI + gateway, 279 tips) ([#8225](https://github.com/NousResearch/hermes-agent/pull/8225), [#8237](https://github.com/NousResearch/hermes-agent/pull/8237))
|
||||
- **`hermes dump`** — copy-pasteable setup summary for debugging ([#6550](https://github.com/NousResearch/hermes-agent/pull/6550))
|
||||
- **`hermes backup` / `hermes import`** — full config backup and restore ([#7997](https://github.com/NousResearch/hermes-agent/pull/7997))
|
||||
- **WSL environment hint** in system prompt ([#8285](https://github.com/NousResearch/hermes-agent/pull/8285))
|
||||
- **Profile creation UX** — seed SOUL.md + credential warning ([#8553](https://github.com/NousResearch/hermes-agent/pull/8553))
|
||||
- Shell-aware sudo detection, empty password support ([#6517](https://github.com/NousResearch/hermes-agent/pull/6517))
|
||||
- Flush stdin after curses/terminal menus to prevent escape sequence leakage ([#7167](https://github.com/NousResearch/hermes-agent/pull/7167))
|
||||
- Handle broken stdin in prompt_toolkit startup ([#8560](https://github.com/NousResearch/hermes-agent/pull/8560))
|
||||
|
||||
### Setup & Configuration
|
||||
- **Per-platform display verbosity** configuration ([#8006](https://github.com/NousResearch/hermes-agent/pull/8006))
|
||||
- **Component-separated logging** with session context and filtering ([#7991](https://github.com/NousResearch/hermes-agent/pull/7991))
|
||||
- **`network.force_ipv4`** config to fix IPv6 timeout issues ([#8196](https://github.com/NousResearch/hermes-agent/pull/8196))
|
||||
- **Standardize message whitespace and JSON formatting** ([#7988](https://github.com/NousResearch/hermes-agent/pull/7988))
|
||||
- **Rebrand OpenClaw → Hermes** during migration ([#8210](https://github.com/NousResearch/hermes-agent/pull/8210))
|
||||
- Config.yaml takes priority over env vars for auxiliary settings ([#7889](https://github.com/NousResearch/hermes-agent/pull/7889))
|
||||
- Harden setup provider flows + live OpenRouter catalog refresh ([#7078](https://github.com/NousResearch/hermes-agent/pull/7078))
|
||||
- Normalize reasoning effort ordering across all surfaces ([#6804](https://github.com/NousResearch/hermes-agent/pull/6804))
|
||||
- Remove dead `LLM_MODEL` env var + migration to clear stale entries ([#6543](https://github.com/NousResearch/hermes-agent/pull/6543))
|
||||
- Remove `/prompt` slash command — prefix expansion footgun ([#6752](https://github.com/NousResearch/hermes-agent/pull/6752))
|
||||
- `HERMES_HOME_MODE` env var to override permissions — @ygd58 ([#6993](https://github.com/NousResearch/hermes-agent/pull/6993))
|
||||
- Fall back to default model when model config is empty ([#8303](https://github.com/NousResearch/hermes-agent/pull/8303))
|
||||
- Warn when compression model context is too small ([#7894](https://github.com/NousResearch/hermes-agent/pull/7894))
|
||||
|
||||
---
|
||||
|
||||
## 🔧 Tool System
|
||||
|
||||
### Environments & Execution
|
||||
- **Unified spawn-per-call execution layer** for environments ([#6343](https://github.com/NousResearch/hermes-agent/pull/6343))
|
||||
- **Unified file sync** with mtime tracking, deletion, and transactional state ([#7087](https://github.com/NousResearch/hermes-agent/pull/7087))
|
||||
- **Persistent sandbox envs** survive between turns ([#6412](https://github.com/NousResearch/hermes-agent/pull/6412))
|
||||
- **Bulk file sync** via tar pipe for SSH/Modal backends — @alt-glitch ([#8014](https://github.com/NousResearch/hermes-agent/pull/8014))
|
||||
- **Daytona** — bulk upload, config bridge, silent disk cap ([#7538](https://github.com/NousResearch/hermes-agent/pull/7538))
|
||||
- Foreground timeout cap to prevent session deadlocks ([#7082](https://github.com/NousResearch/hermes-agent/pull/7082))
|
||||
- Guard invalid command values ([#6417](https://github.com/NousResearch/hermes-agent/pull/6417))
|
||||
|
||||
### MCP
|
||||
- **`hermes mcp add --env` and `--preset`** support ([#7970](https://github.com/NousResearch/hermes-agent/pull/7970))
|
||||
- Combine `content` and `structuredContent` when both present ([#7118](https://github.com/NousResearch/hermes-agent/pull/7118))
|
||||
- MCP tool name deconfliction fixes ([#7654](https://github.com/NousResearch/hermes-agent/pull/7654))
|
||||
|
||||
### Browser
|
||||
- Browser hardening — dead code removal, caching, scroll perf, security, thread safety ([#7354](https://github.com/NousResearch/hermes-agent/pull/7354))
|
||||
- `/browser connect` auto-launch uses dedicated Chrome profile dir ([#6821](https://github.com/NousResearch/hermes-agent/pull/6821))
|
||||
- Reap orphaned browser sessions on startup ([#7931](https://github.com/NousResearch/hermes-agent/pull/7931))
|
||||
|
||||
### Voice & Vision
|
||||
- **Voxtral TTS provider** (Mistral AI) ([#7653](https://github.com/NousResearch/hermes-agent/pull/7653))
|
||||
- **TTS speed support** for Edge TTS, OpenAI TTS, MiniMax ([#8666](https://github.com/NousResearch/hermes-agent/pull/8666))
|
||||
- **Vision auto-resize** for oversized images, raise limit to 20 MB, retry-on-failure ([#7883](https://github.com/NousResearch/hermes-agent/pull/7883), [#7902](https://github.com/NousResearch/hermes-agent/pull/7902))
|
||||
- STT provider-model mismatch fix (whisper-1 vs faster-whisper) ([#7113](https://github.com/NousResearch/hermes-agent/pull/7113))
|
||||
|
||||
### Other Tools
|
||||
- **`hermes dump`** command for setup summary ([#6550](https://github.com/NousResearch/hermes-agent/pull/6550))
|
||||
- TODO store enforces ID uniqueness during replace operations ([#7986](https://github.com/NousResearch/hermes-agent/pull/7986))
|
||||
- List all available toolsets in `delegate_task` schema description ([#8231](https://github.com/NousResearch/hermes-agent/pull/8231))
|
||||
- API server: tool progress as custom SSE event to prevent model corruption ([#7500](https://github.com/NousResearch/hermes-agent/pull/7500))
|
||||
- API server: share one Docker container across all conversations ([#7127](https://github.com/NousResearch/hermes-agent/pull/7127))
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Skills Ecosystem
|
||||
|
||||
- **Centralized skills index + tree cache** — eliminates rate-limit failures on install ([#8575](https://github.com/NousResearch/hermes-agent/pull/8575))
|
||||
- **More aggressive skill loading instructions** in system prompt (v3) ([#8209](https://github.com/NousResearch/hermes-agent/pull/8209), [#8286](https://github.com/NousResearch/hermes-agent/pull/8286))
|
||||
- **Google Workspace skill** migrated to GWS CLI backend ([#6788](https://github.com/NousResearch/hermes-agent/pull/6788))
|
||||
- **Creative divergence strategies** skill — @SHL0MS ([#6882](https://github.com/NousResearch/hermes-agent/pull/6882))
|
||||
- **Creative ideation** — constraint-driven project generation — @SHL0MS ([#7555](https://github.com/NousResearch/hermes-agent/pull/7555))
|
||||
- Parallelize skills browse/search to prevent hanging ([#7301](https://github.com/NousResearch/hermes-agent/pull/7301))
|
||||
- Read name from SKILL.md frontmatter in skills_sync ([#7623](https://github.com/NousResearch/hermes-agent/pull/7623))
|
||||
|
||||
---
|
||||
|
||||
## 🔒 Security & Reliability
|
||||
|
||||
### Security Hardening
|
||||
- **Twilio webhook signature validation** — SMS RCE fix ([#7933](https://github.com/NousResearch/hermes-agent/pull/7933))
|
||||
- **Shell injection neutralization** in `_write_to_sandbox` via path quoting ([#7940](https://github.com/NousResearch/hermes-agent/pull/7940))
|
||||
- **Git argument injection** and path traversal prevention in checkpoint manager ([#7944](https://github.com/NousResearch/hermes-agent/pull/7944))
|
||||
- **SSRF redirect bypass** in Slack image uploads + base.py cache helpers ([#7151](https://github.com/NousResearch/hermes-agent/pull/7151))
|
||||
- **Path traversal, credential gate, DANGEROUS_PATTERNS gaps** ([#7156](https://github.com/NousResearch/hermes-agent/pull/7156))
|
||||
- **API bind guard** — enforce `API_SERVER_KEY` for non-loopback binding ([#7455](https://github.com/NousResearch/hermes-agent/pull/7455))
|
||||
- **Approval button authorization** — require auth for session continuation — @Cafexss ([#6930](https://github.com/NousResearch/hermes-agent/pull/6930))
|
||||
- Path boundary enforcement in skill manager operations ([#7156](https://github.com/NousResearch/hermes-agent/pull/7156))
|
||||
- DingTalk/API webhook URL origin validation, header injection rejection ([#7455](https://github.com/NousResearch/hermes-agent/pull/7455))
|
||||
|
||||
### Reliability
|
||||
- **Contextual error diagnostics** for invalid API responses ([#8565](https://github.com/NousResearch/hermes-agent/pull/8565))
|
||||
- **Prevent 400 format errors** from triggering compression loop on Codex ([#6751](https://github.com/NousResearch/hermes-agent/pull/6751))
|
||||
- **Don't halve context_length** on output-cap-too-large errors — @KUSH42 ([#6664](https://github.com/NousResearch/hermes-agent/pull/6664))
|
||||
- **Recover primary client** on OpenAI transport errors ([#7108](https://github.com/NousResearch/hermes-agent/pull/7108))
|
||||
- **Credential pool rotation** on billing-classified 400s ([#7112](https://github.com/NousResearch/hermes-agent/pull/7112))
|
||||
- **Auto-increase stream read timeout** for local LLM providers ([#6967](https://github.com/NousResearch/hermes-agent/pull/6967))
|
||||
- **Fall back to default certs** when CA bundle path doesn't exist ([#7352](https://github.com/NousResearch/hermes-agent/pull/7352))
|
||||
- **Disambiguate usage-limit patterns** in error classifier — @sprmn24 ([#6836](https://github.com/NousResearch/hermes-agent/pull/6836))
|
||||
- Harden cron script timeout and provider recovery ([#7079](https://github.com/NousResearch/hermes-agent/pull/7079))
|
||||
- Gateway interrupt detection resilient to monitor task failures ([#8208](https://github.com/NousResearch/hermes-agent/pull/8208))
|
||||
- Prevent unwanted session auto-reset after graceful gateway restarts ([#8299](https://github.com/NousResearch/hermes-agent/pull/8299))
|
||||
- Prevent duplicate update prompt spam in gateway watcher ([#8343](https://github.com/NousResearch/hermes-agent/pull/8343))
|
||||
- Deduplicate reasoning items in Responses API input ([#7946](https://github.com/NousResearch/hermes-agent/pull/7946))
|
||||
|
||||
### Infrastructure
|
||||
- **Multi-arch Docker image** — amd64 + arm64 ([#6124](https://github.com/NousResearch/hermes-agent/pull/6124))
|
||||
- **Docker runs as non-root user** with virtualenv — @benbarclay contributing ([#8226](https://github.com/NousResearch/hermes-agent/pull/8226))
|
||||
- **Use `uv`** for Docker dependency resolution to fix resolution-too-deep ([#6965](https://github.com/NousResearch/hermes-agent/pull/6965))
|
||||
- **Container-aware Nix CLI** — auto-route into managed container — @alt-glitch ([#7543](https://github.com/NousResearch/hermes-agent/pull/7543))
|
||||
- **Nix shared-state permission model** for interactive CLI users — @alt-glitch ([#6796](https://github.com/NousResearch/hermes-agent/pull/6796))
|
||||
- **Per-profile subprocess HOME isolation** ([#7357](https://github.com/NousResearch/hermes-agent/pull/7357))
|
||||
- Profile paths fixed in Docker — profiles go to mounted volume ([#7170](https://github.com/NousResearch/hermes-agent/pull/7170))
|
||||
- Docker container gateway pathway hardened ([#8614](https://github.com/NousResearch/hermes-agent/pull/8614))
|
||||
- Enable unbuffered stdout for live Docker logs ([#6749](https://github.com/NousResearch/hermes-agent/pull/6749))
|
||||
- Install procps in Docker image — @HiddenPuppy ([#7032](https://github.com/NousResearch/hermes-agent/pull/7032))
|
||||
- Shallow git clone for faster installation — @sosyz ([#8396](https://github.com/NousResearch/hermes-agent/pull/8396))
|
||||
- `hermes update` always reset on stash conflict ([#7010](https://github.com/NousResearch/hermes-agent/pull/7010))
|
||||
- Write update exit code before gateway restart (cgroup kill race) ([#8288](https://github.com/NousResearch/hermes-agent/pull/8288))
|
||||
- Nix: `setupSecrets` optional, tirith runtime dep — @devorun, @ethernet8023 ([#6261](https://github.com/NousResearch/hermes-agent/pull/6261), [#6721](https://github.com/NousResearch/hermes-agent/pull/6721))
|
||||
- launchd stop uses `bootout` so `KeepAlive` doesn't respawn ([#7119](https://github.com/NousResearch/hermes-agent/pull/7119))
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Notable Bug Fixes
|
||||
|
||||
- Fix: `/model` switch not persisting across gateway messages ([#7081](https://github.com/NousResearch/hermes-agent/pull/7081))
|
||||
- Fix: session-scoped gateway model overrides ignored — @Hygaard ([#7662](https://github.com/NousResearch/hermes-agent/pull/7662))
|
||||
- Fix: compaction model context length ignoring config — 3 related issues ([#8258](https://github.com/NousResearch/hermes-agent/pull/8258), [#8107](https://github.com/NousResearch/hermes-agent/pull/8107))
|
||||
- Fix: OpenCode.ai context window resolved to 128K instead of 1M ([#6472](https://github.com/NousResearch/hermes-agent/pull/6472))
|
||||
- Fix: Codex fallback auth-store lookup — @cherifya ([#6462](https://github.com/NousResearch/hermes-agent/pull/6462))
|
||||
- Fix: duplicate completion notifications when process killed ([#7124](https://github.com/NousResearch/hermes-agent/pull/7124))
|
||||
- Fix: agent daemon thread prevents orphan CLI processes on tab close ([#8557](https://github.com/NousResearch/hermes-agent/pull/8557))
|
||||
- Fix: stale image attachment on text paste and voice input ([#7077](https://github.com/NousResearch/hermes-agent/pull/7077))
|
||||
- Fix: DM thread session seeding causing cross-thread contamination ([#7084](https://github.com/NousResearch/hermes-agent/pull/7084))
|
||||
- Fix: OpenClaw migration shows dry-run preview before executing ([#6769](https://github.com/NousResearch/hermes-agent/pull/6769))
|
||||
- Fix: auth errors misclassified as retryable — @kuishou68 ([#7027](https://github.com/NousResearch/hermes-agent/pull/7027))
|
||||
- Fix: Copilot-Integration-Id header missing ([#7083](https://github.com/NousResearch/hermes-agent/pull/7083))
|
||||
- Fix: ACP session capabilities — @luyao618 ([#6985](https://github.com/NousResearch/hermes-agent/pull/6985))
|
||||
- Fix: ACP PromptResponse usage from top-level fields ([#7086](https://github.com/NousResearch/hermes-agent/pull/7086))
|
||||
- Fix: several failing/flaky tests on main — @dsocolobsky ([#6777](https://github.com/NousResearch/hermes-agent/pull/6777))
|
||||
- Fix: backup marker filenames — @sprmn24 ([#8600](https://github.com/NousResearch/hermes-agent/pull/8600))
|
||||
- Fix: `NoneType` in fast_mode check — @0xbyt4 ([#7350](https://github.com/NousResearch/hermes-agent/pull/7350))
|
||||
- Fix: missing imports in uninstall.py — @JiayuuWang ([#7034](https://github.com/NousResearch/hermes-agent/pull/7034))
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- Platform adapter developer guide + WeCom Callback docs ([#7969](https://github.com/NousResearch/hermes-agent/pull/7969))
|
||||
- Cron troubleshooting guide ([#7122](https://github.com/NousResearch/hermes-agent/pull/7122))
|
||||
- Streaming timeout auto-detection for local LLMs ([#6990](https://github.com/NousResearch/hermes-agent/pull/6990))
|
||||
- Tool-use enforcement documentation expanded ([#7984](https://github.com/NousResearch/hermes-agent/pull/7984))
|
||||
- BlueBubbles pairing instructions ([#6548](https://github.com/NousResearch/hermes-agent/pull/6548))
|
||||
- Telegram proxy support section ([#6348](https://github.com/NousResearch/hermes-agent/pull/6348))
|
||||
- `hermes dump` and `hermes logs` CLI reference ([#6552](https://github.com/NousResearch/hermes-agent/pull/6552))
|
||||
- `tool_progress_overrides` configuration reference ([#6364](https://github.com/NousResearch/hermes-agent/pull/6364))
|
||||
- Compression model context length warning docs ([#7879](https://github.com/NousResearch/hermes-agent/pull/7879))
|
||||
|
||||
---
|
||||
|
||||
## 👥 Contributors
|
||||
|
||||
**269 merged PRs** from **24 contributors** across **487 commits**.
|
||||
|
||||
### Community Contributors
|
||||
- **@alt-glitch** (6 PRs) — Nix container-aware CLI, shared-state permissions, Matrix SQLite crypto store, bulk SSH/Modal file sync, Matrix mautrix compat
|
||||
- **@SHL0MS** (2 PRs) — Creative divergence strategies skill, creative ideation skill
|
||||
- **@sprmn24** (2 PRs) — Error classifier disambiguation, backup marker fix
|
||||
- **@nicoloboschi** — Hindsight memory plugin feature parity
|
||||
- **@Hygaard** — Session-scoped gateway model override fix
|
||||
- **@jarvis-phw** — Discord allowed_channels whitelist
|
||||
- **@Kathie-yu** — Honcho initOnSessionStart for tools mode
|
||||
- **@hermes-agent-dhabibi** — Discord forum channel topic inheritance
|
||||
- **@kira-ariaki** — Discord .log attachments and size limit
|
||||
- **@cherifya** — Codex fallback auth-store lookup
|
||||
- **@Cafexss** — Security: auth for session continuation
|
||||
- **@KUSH42** — Compaction context_length fix
|
||||
- **@kuishou68** — Auth error retryable classification fix
|
||||
- **@luyao618** — ACP session capabilities
|
||||
- **@ygd58** — HERMES_HOME_MODE env var override
|
||||
- **@0xbyt4** — Fast mode NoneType fix
|
||||
- **@JiayuuWang** — CLI uninstall import fix
|
||||
- **@HiddenPuppy** — Docker procps installation
|
||||
- **@dsocolobsky** — Test suite fixes
|
||||
- **@bobashopcashier** (1 PR) — Graceful gateway drain before restart (salvaged into #7503 from #7290)
|
||||
- **@benbarclay** — Docker image tag simplification
|
||||
- **@sosyz** — Shallow git clone for faster install
|
||||
- **@devorun** — Nix setupSecrets optional
|
||||
- **@ethernet8023** — Nix tirith runtime dep
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v2026.4.8...v2026.4.13](https://github.com/NousResearch/hermes-agent/compare/v2026.4.8...v2026.4.13)
|
||||
84
SECURITY.md
84
SECURITY.md
@@ -1,84 +0,0 @@
|
||||
# Hermes Agent Security Policy
|
||||
|
||||
This document outlines the security protocols, trust model, and deployment hardening guidelines for the **Hermes Agent** project.
|
||||
|
||||
## 1. Vulnerability Reporting
|
||||
|
||||
Hermes Agent does **not** operate a bug bounty program. Security issues should be reported via [GitHub Security Advisories (GHSA)](https://github.com/NousResearch/hermes-agent/security/advisories/new) or by emailing **security@nousresearch.com**. Do not open public issues for security vulnerabilities.
|
||||
|
||||
### Required Submission Details
|
||||
- **Title & Severity:** Concise description and CVSS score/rating.
|
||||
- **Affected Component:** Exact file path and line range (e.g., `tools/approval.py:120-145`).
|
||||
- **Environment:** Output of `hermes version`, commit SHA, OS, and Python version.
|
||||
- **Reproduction:** Step-by-step Proof-of-Concept (PoC) against `main` or the latest release.
|
||||
- **Impact:** Explanation of what trust boundary was crossed.
|
||||
|
||||
---
|
||||
|
||||
## 2. Trust Model
|
||||
|
||||
The core assumption is that Hermes is a **personal agent** with one trusted operator.
|
||||
|
||||
### Operator & Session Trust
|
||||
- **Single Tenant:** The system protects the operator from LLM actions, not from malicious co-tenants. Multi-user isolation must happen at the OS/host level.
|
||||
- **Gateway Security:** Authorized callers (Telegram, Discord, Slack, etc.) receive equal trust. Session keys are used for routing, not as authorization boundaries.
|
||||
- **Execution:** Defaults to `terminal.backend: local` (direct host execution). Container isolation (Docker, Modal, Daytona) is opt-in for sandboxing.
|
||||
|
||||
### Dangerous Command Approval
|
||||
The approval system (`tools/approval.py`) is a core security boundary. Terminal commands, file operations, and other potentially destructive actions are gated behind explicit user confirmation before execution. The approval mode is configurable via `approvals.mode` in `config.yaml`:
|
||||
- `"on"` (default) — prompts the user to approve dangerous commands.
|
||||
- `"auto"` — auto-approves after a configurable delay.
|
||||
- `"off"` — disables the gate entirely (break-glass; see Section 3).
|
||||
|
||||
### Output Redaction
|
||||
`agent/redact.py` strips secret-like patterns (API keys, tokens, credentials) from all display output before it reaches the terminal or gateway platform. This prevents accidental credential leakage in chat logs, tool previews, and response text. Redaction operates on the display layer only — underlying values remain intact for internal agent operations.
|
||||
|
||||
### Skills vs. MCP Servers
|
||||
- **Installed Skills:** High trust. Equivalent to local host code; skills can read environment variables and run arbitrary commands.
|
||||
- **MCP Servers:** Lower trust. MCP subprocesses receive a filtered environment (`_build_safe_env()` in `tools/mcp_tool.py`) — only safe baseline variables (`PATH`, `HOME`, `XDG_*`) plus variables explicitly declared in the server's `env` config block are passed through. Host credentials are stripped by default. Additionally, packages invoked via `npx`/`uvx` are checked against the OSV malware database before spawning.
|
||||
|
||||
### Code Execution Sandbox
|
||||
The `execute_code` tool (`tools/code_execution_tool.py`) runs LLM-generated Python scripts in a child process with API keys and tokens stripped from the environment to prevent credential exfiltration. Only environment variables explicitly declared by loaded skills (via `env_passthrough`) or by the user in `config.yaml` (`terminal.env_passthrough`) are passed through. The child accesses Hermes tools via RPC, not direct API calls.
|
||||
|
||||
### Subagents
|
||||
- **No recursive delegation:** The `delegate_task` tool is disabled for child agents.
|
||||
- **Depth limit:** `MAX_DEPTH = 2` — parent (depth 0) can spawn a child (depth 1); grandchildren are rejected.
|
||||
- **Memory isolation:** Subagents run with `skip_memory=True` and do not have access to the parent's persistent memory provider. The parent receives only the task prompt and final response as an observation.
|
||||
|
||||
---
|
||||
|
||||
## 3. Out of Scope (Non-Vulnerabilities)
|
||||
|
||||
The following scenarios are **not** considered security breaches:
|
||||
- **Prompt Injection:** Unless it results in a concrete bypass of the approval system, toolset restrictions, or container sandbox.
|
||||
- **Public Exposure:** Deploying the gateway to the public internet without external authentication or network protection.
|
||||
- **Trusted State Access:** Reports that require pre-existing write access to `~/.hermes/`, `.env`, or `config.yaml` (these are operator-owned files).
|
||||
- **Default Behavior:** Host-level command execution when `terminal.backend` is set to `local` — this is the documented default, not a vulnerability.
|
||||
- **Configuration Trade-offs:** Intentional break-glass settings such as `approvals.mode: "off"` or `terminal.backend: local` in production.
|
||||
- **Tool-level read/access restrictions:** The agent has unrestricted shell access via the `terminal` tool by design. Reports that a specific tool (e.g., `read_file`) can access a resource are not vulnerabilities if the same access is available through `terminal`. Tool-level deny lists only constitute a meaningful security boundary when paired with equivalent restrictions on the terminal side (as with write operations, where `WRITE_DENIED_PATHS` is paired with the dangerous command approval system).
|
||||
|
||||
---
|
||||
|
||||
## 4. Deployment Hardening & Best Practices
|
||||
|
||||
### Filesystem & Network
|
||||
- **Production sandboxing:** Use container backends (`docker`, `modal`, `daytona`) instead of `local` for untrusted workloads.
|
||||
- **File permissions:** Run as non-root (the Docker image uses UID 10000); protect credentials with `chmod 600 ~/.hermes/.env` on local installs.
|
||||
- **Network exposure:** Do not expose the gateway or API server to the public internet without VPN, Tailscale, or firewall protection. SSRF protection is enabled by default across all gateway platform adapters (Telegram, Discord, Slack, Matrix, Mattermost, etc.) with redirect validation. Note: the local terminal backend does not apply SSRF filtering, as it operates within the trusted operator's environment.
|
||||
|
||||
### Skills & Supply Chain
|
||||
- **Skill installation:** Review Skills Guard reports (`tools/skills_guard.py`) before installing third-party skills. The audit log at `~/.hermes/skills/.hub/audit.log` tracks every install and removal.
|
||||
- **MCP safety:** OSV malware checking runs automatically for `npx`/`uvx` packages before MCP server processes are spawned.
|
||||
- **CI/CD:** GitHub Actions are pinned to full commit SHAs. The `supply-chain-audit.yml` workflow blocks PRs containing `.pth` files or suspicious `base64`+`exec` patterns.
|
||||
|
||||
### Credential Storage
|
||||
- API keys and tokens belong exclusively in `~/.hermes/.env` — never in `config.yaml` or checked into version control.
|
||||
- The credential pool system (`agent/credential_pool.py`) handles key rotation and fallback. Credentials are resolved from environment variables, not stored in plaintext databases.
|
||||
|
||||
---
|
||||
|
||||
## 5. Disclosure Process
|
||||
|
||||
- **Coordinated Disclosure:** 90-day window or until a fix is released, whichever comes first.
|
||||
- **Communication:** All updates occur via the GHSA thread or email correspondence with security@nousresearch.com.
|
||||
- **Credits:** Reporters are credited in release notes unless anonymity is requested.
|
||||
@@ -15,51 +15,12 @@ Usage::
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
|
||||
# Methods clients send as periodic liveness probes. They are not part of the
|
||||
# ACP schema, so the acp router correctly returns JSON-RPC -32601 to the
|
||||
# caller — but the supervisor task that dispatches the request then surfaces
|
||||
# the raised RequestError via ``logging.exception("Background task failed")``,
|
||||
# which dumps a traceback to stderr every probe interval. Clients like
|
||||
# acp-bridge already treat the -32601 response as "agent alive", so the
|
||||
# traceback is pure noise. We keep the protocol response intact and only
|
||||
# silence the stderr noise for this specific benign case.
|
||||
_BENIGN_PROBE_METHODS = frozenset({"ping", "health", "healthcheck"})
|
||||
|
||||
|
||||
class _BenignProbeMethodFilter(logging.Filter):
|
||||
"""Suppress acp 'Background task failed' tracebacks caused by unknown
|
||||
liveness-probe methods (e.g. ``ping``) while leaving every other
|
||||
background-task error — including method_not_found for any non-probe
|
||||
method — visible in stderr.
|
||||
"""
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
if record.getMessage() != "Background task failed":
|
||||
return True
|
||||
exc_info = record.exc_info
|
||||
if not exc_info:
|
||||
return True
|
||||
exc = exc_info[1]
|
||||
# Imported lazily so this module stays importable when the optional
|
||||
# ``agent-client-protocol`` dependency is not installed.
|
||||
try:
|
||||
from acp.exceptions import RequestError
|
||||
except ImportError:
|
||||
return True
|
||||
if not isinstance(exc, RequestError):
|
||||
return True
|
||||
if getattr(exc, "code", None) != -32601:
|
||||
return True
|
||||
data = getattr(exc, "data", None)
|
||||
method = data.get("method") if isinstance(data, dict) else None
|
||||
return method not in _BENIGN_PROBE_METHODS
|
||||
|
||||
|
||||
def _setup_logging() -> None:
|
||||
"""Route all logging to stderr so stdout stays clean for ACP stdio."""
|
||||
handler = logging.StreamHandler(sys.stderr)
|
||||
@@ -69,7 +30,6 @@ def _setup_logging() -> None:
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
)
|
||||
handler.addFilter(_BenignProbeMethodFilter())
|
||||
root = logging.getLogger()
|
||||
root.handlers.clear()
|
||||
root.addHandler(handler)
|
||||
@@ -112,17 +72,6 @@ def main() -> None:
|
||||
import acp
|
||||
from .server import HermesACPAgent
|
||||
|
||||
# MCP tool discovery from config.yaml — run before asyncio.run() so
|
||||
# it's safe to use blocking waits. (ACP also registers per-session
|
||||
# MCP servers dynamically via asyncio.to_thread inside the event
|
||||
# loop; that path is unaffected.) Moved from model_tools.py module
|
||||
# scope to avoid freezing the gateway's loop on lazy import (#16856).
|
||||
try:
|
||||
from tools.mcp_tool import discover_mcp_tools
|
||||
discover_mcp_tools()
|
||||
except Exception:
|
||||
logger.debug("MCP tool discovery failed at ACP startup", exc_info=True)
|
||||
|
||||
agent = HermesACPAgent()
|
||||
try:
|
||||
asyncio.run(acp.run_agent(agent, use_unstable_protocol=True))
|
||||
|
||||
@@ -49,24 +49,19 @@ def make_tool_progress_cb(
|
||||
session_id: str,
|
||||
loop: asyncio.AbstractEventLoop,
|
||||
tool_call_ids: Dict[str, Deque[str]],
|
||||
tool_call_meta: Dict[str, Dict[str, Any]],
|
||||
) -> Callable:
|
||||
"""Create a ``tool_progress_callback`` for AIAgent.
|
||||
|
||||
Signature expected by AIAgent::
|
||||
|
||||
tool_progress_callback(event_type: str, name: str, preview: str, args: dict, **kwargs)
|
||||
tool_progress_callback(name: str, preview: str, args: dict)
|
||||
|
||||
Emits ``ToolCallStart`` for ``tool.started`` events and tracks IDs in a FIFO
|
||||
Emits ``ToolCallStart`` for each tool invocation and tracks IDs in a FIFO
|
||||
queue per tool name so duplicate/parallel same-name calls still complete
|
||||
against the correct ACP tool call. Other event types (``tool.completed``,
|
||||
``reasoning.available``) are silently ignored.
|
||||
against the correct ACP tool call.
|
||||
"""
|
||||
|
||||
def _tool_progress(event_type: str, name: str = None, preview: str = None, args: Any = None, **kwargs) -> None:
|
||||
# Only emit ACP ToolCallStart for tool.started; ignore other event types
|
||||
if event_type != "tool.started":
|
||||
return
|
||||
def _tool_progress(name: str, preview: str, args: Any = None) -> None:
|
||||
if isinstance(args, str):
|
||||
try:
|
||||
args = json.loads(args)
|
||||
@@ -85,16 +80,6 @@ def make_tool_progress_cb(
|
||||
tool_call_ids[name] = queue
|
||||
queue.append(tc_id)
|
||||
|
||||
snapshot = None
|
||||
if name in {"write_file", "patch", "skill_manage"}:
|
||||
try:
|
||||
from agent.display import capture_local_edit_snapshot
|
||||
|
||||
snapshot = capture_local_edit_snapshot(name, args)
|
||||
except Exception:
|
||||
logger.debug("Failed to capture ACP edit snapshot for %s", name, exc_info=True)
|
||||
tool_call_meta[tc_id] = {"args": args, "snapshot": snapshot}
|
||||
|
||||
update = build_tool_start(tc_id, name, args)
|
||||
_send_update(conn, session_id, loop, update)
|
||||
|
||||
@@ -130,7 +115,6 @@ def make_step_cb(
|
||||
session_id: str,
|
||||
loop: asyncio.AbstractEventLoop,
|
||||
tool_call_ids: Dict[str, Deque[str]],
|
||||
tool_call_meta: Dict[str, Dict[str, Any]],
|
||||
) -> Callable:
|
||||
"""Create a ``step_callback`` for AIAgent.
|
||||
|
||||
@@ -144,12 +128,10 @@ def make_step_cb(
|
||||
for tool_info in prev_tools:
|
||||
tool_name = None
|
||||
result = None
|
||||
function_args = None
|
||||
|
||||
if isinstance(tool_info, dict):
|
||||
tool_name = tool_info.get("name") or tool_info.get("function_name")
|
||||
result = tool_info.get("result") or tool_info.get("output")
|
||||
function_args = tool_info.get("arguments") or tool_info.get("args")
|
||||
elif isinstance(tool_info, str):
|
||||
tool_name = tool_info
|
||||
|
||||
@@ -159,13 +141,8 @@ def make_step_cb(
|
||||
tool_call_ids[tool_name] = queue
|
||||
if tool_name and queue:
|
||||
tc_id = queue.popleft()
|
||||
meta = tool_call_meta.pop(tc_id, {})
|
||||
update = build_tool_complete(
|
||||
tc_id,
|
||||
tool_name,
|
||||
result=str(result) if result is not None else None,
|
||||
function_args=function_args or meta.get("args"),
|
||||
snapshot=meta.get("snapshot"),
|
||||
tc_id, tool_name, result=str(result) if result is not None else None
|
||||
)
|
||||
_send_update(conn, session_id, loop, update)
|
||||
if not queue:
|
||||
|
||||
@@ -63,9 +63,6 @@ def make_approval_callback(
|
||||
logger.warning("Permission request timed out or failed: %s", exc)
|
||||
return "deny"
|
||||
|
||||
if response is None:
|
||||
return "deny"
|
||||
|
||||
outcome = response.outcome
|
||||
if isinstance(outcome, AllowedOutcome):
|
||||
option_id = outcome.option_id
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,12 +13,7 @@ from hermes_constants import get_hermes_home
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from dataclasses import dataclass, field
|
||||
from threading import Lock
|
||||
from typing import Any, Dict, List, Optional
|
||||
@@ -26,135 +21,17 @@ from typing import Any, Dict, List, Optional
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _win_path_to_wsl(path: str) -> str | None:
|
||||
"""Convert a Windows drive path to its WSL /mnt/<drive>/... equivalent."""
|
||||
match = re.match(r"^([A-Za-z]):[\\/](.*)$", path)
|
||||
if not match:
|
||||
return None
|
||||
drive = match.group(1).lower()
|
||||
tail = match.group(2).replace("\\", "/")
|
||||
return f"/mnt/{drive}/{tail}"
|
||||
|
||||
|
||||
def _translate_acp_cwd(cwd: str) -> str:
|
||||
"""Translate Windows ACP cwd values when Hermes itself is running in WSL.
|
||||
|
||||
Windows ACP clients can launch ``hermes acp`` inside WSL while still sending
|
||||
editor workspaces as Windows drive paths such as ``E:\\Projects``. Store
|
||||
and execute against the WSL mount path so agents, tools, and persisted ACP
|
||||
sessions all agree on the usable workspace. Native Linux/macOS keeps the
|
||||
original cwd unchanged.
|
||||
"""
|
||||
from hermes_constants import is_wsl
|
||||
|
||||
if not is_wsl():
|
||||
return cwd
|
||||
translated = _win_path_to_wsl(str(cwd))
|
||||
return translated if translated is not None else cwd
|
||||
|
||||
|
||||
def _normalize_cwd_for_compare(cwd: str | None) -> str:
|
||||
raw = str(cwd or ".").strip()
|
||||
if not raw:
|
||||
raw = "."
|
||||
expanded = os.path.expanduser(raw)
|
||||
|
||||
# Normalize Windows drive paths into the equivalent WSL mount form so
|
||||
# ACP history filters match the same workspace across Windows and WSL.
|
||||
translated = _win_path_to_wsl(expanded)
|
||||
if translated is not None:
|
||||
expanded = translated
|
||||
elif re.match(r"^/mnt/[A-Za-z]/", expanded):
|
||||
expanded = f"/mnt/{expanded[5].lower()}/{expanded[7:]}"
|
||||
|
||||
return os.path.normpath(expanded)
|
||||
|
||||
|
||||
def _build_session_title(title: Any, preview: Any, cwd: str | None) -> str:
|
||||
explicit = str(title or "").strip()
|
||||
if explicit:
|
||||
return explicit
|
||||
preview_text = str(preview or "").strip()
|
||||
if preview_text:
|
||||
return preview_text
|
||||
leaf = os.path.basename(str(cwd or "").rstrip("/\\"))
|
||||
return leaf or "New thread"
|
||||
|
||||
|
||||
def _format_updated_at(value: Any) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, str) and value.strip():
|
||||
return value
|
||||
try:
|
||||
return datetime.fromtimestamp(float(value), tz=timezone.utc).isoformat()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _updated_at_sort_key(value: Any) -> float:
|
||||
if value is None:
|
||||
return float("-inf")
|
||||
if isinstance(value, (int, float)):
|
||||
return float(value)
|
||||
raw = str(value).strip()
|
||||
if not raw:
|
||||
return float("-inf")
|
||||
try:
|
||||
return datetime.fromisoformat(raw.replace("Z", "+00:00")).timestamp()
|
||||
except Exception:
|
||||
try:
|
||||
return float(raw)
|
||||
except Exception:
|
||||
return float("-inf")
|
||||
|
||||
|
||||
def _acp_stderr_print(*args, **kwargs) -> None:
|
||||
"""Best-effort human-readable output sink for ACP stdio sessions.
|
||||
|
||||
ACP reserves stdout for JSON-RPC frames, so any incidental CLI/status output
|
||||
from AIAgent must be redirected away from stdout. Route it to stderr instead.
|
||||
"""
|
||||
kwargs = dict(kwargs)
|
||||
kwargs.setdefault("file", sys.stderr)
|
||||
print(*args, **kwargs)
|
||||
|
||||
|
||||
def _register_task_cwd(task_id: str, cwd: str) -> None:
|
||||
"""Bind a task/session id to the editor's working directory for tools.
|
||||
|
||||
Zed can launch Hermes from a Windows workspace while the ACP process runs
|
||||
inside WSL. In that case ACP sends cwd as e.g. ``E:\\Projects\\POTI``;
|
||||
local tools need the WSL mount equivalent or subprocess creation fails
|
||||
before the command can run.
|
||||
"""
|
||||
"""Bind a task/session id to the editor's working directory for tools."""
|
||||
if not task_id:
|
||||
return
|
||||
try:
|
||||
from tools.terminal_tool import register_task_env_overrides
|
||||
register_task_env_overrides(task_id, {"cwd": _translate_acp_cwd(cwd)})
|
||||
register_task_env_overrides(task_id, {"cwd": cwd})
|
||||
except Exception:
|
||||
logger.debug("Failed to register ACP task cwd override", exc_info=True)
|
||||
|
||||
|
||||
def _expand_acp_enabled_toolsets(
|
||||
toolsets: List[str] | None = None,
|
||||
mcp_server_names: List[str] | None = None,
|
||||
) -> List[str]:
|
||||
"""Return ACP toolsets plus explicit MCP server toolsets for this session."""
|
||||
expanded: List[str] = []
|
||||
for name in list(toolsets or ["hermes-acp"]):
|
||||
if name and name not in expanded:
|
||||
expanded.append(name)
|
||||
|
||||
for server_name in list(mcp_server_names or []):
|
||||
toolset_name = f"mcp-{server_name}"
|
||||
if server_name and toolset_name not in expanded:
|
||||
expanded.append(toolset_name)
|
||||
|
||||
return expanded
|
||||
|
||||
|
||||
def _clear_task_cwd(task_id: str) -> None:
|
||||
"""Remove task-specific cwd overrides for an ACP session."""
|
||||
if not task_id:
|
||||
@@ -176,11 +53,6 @@ class SessionState:
|
||||
model: str = ""
|
||||
history: List[Dict[str, Any]] = field(default_factory=list)
|
||||
cancel_event: Any = None # threading.Event
|
||||
is_running: bool = False
|
||||
queued_prompts: List[str] = field(default_factory=list)
|
||||
runtime_lock: Any = field(default_factory=Lock)
|
||||
current_prompt_text: str = ""
|
||||
interrupted_prompt_text: str = ""
|
||||
|
||||
|
||||
class SessionManager:
|
||||
@@ -211,7 +83,6 @@ class SessionManager:
|
||||
"""Create a new session with a unique ID and a fresh AIAgent."""
|
||||
import threading
|
||||
|
||||
cwd = _translate_acp_cwd(cwd)
|
||||
session_id = str(uuid.uuid4())
|
||||
agent = self._make_agent(session_id=session_id, cwd=cwd)
|
||||
state = SessionState(
|
||||
@@ -254,7 +125,6 @@ class SessionManager:
|
||||
"""Deep-copy a session's history into a new session."""
|
||||
import threading
|
||||
|
||||
cwd = _translate_acp_cwd(cwd)
|
||||
original = self.get_session(session_id) # checks DB too
|
||||
if original is None:
|
||||
return None
|
||||
@@ -280,83 +150,51 @@ class SessionManager:
|
||||
logger.info("Forked ACP session %s -> %s", session_id, new_id)
|
||||
return state
|
||||
|
||||
def list_sessions(self, cwd: str | None = None) -> List[Dict[str, Any]]:
|
||||
def list_sessions(self) -> List[Dict[str, Any]]:
|
||||
"""Return lightweight info dicts for all sessions (memory + database)."""
|
||||
normalized_cwd = _normalize_cwd_for_compare(cwd) if cwd else None
|
||||
db = self._get_db()
|
||||
persisted_rows: dict[str, dict[str, Any]] = {}
|
||||
|
||||
if db is not None:
|
||||
try:
|
||||
for row in db.list_sessions_rich(source="acp", limit=1000):
|
||||
persisted_rows[str(row["id"])] = dict(row)
|
||||
except Exception:
|
||||
logger.debug("Failed to load ACP sessions from DB", exc_info=True)
|
||||
|
||||
# Collect in-memory sessions first.
|
||||
with self._lock:
|
||||
seen_ids = set(self._sessions.keys())
|
||||
results = []
|
||||
for s in self._sessions.values():
|
||||
history_len = len(s.history)
|
||||
if history_len <= 0:
|
||||
continue
|
||||
if normalized_cwd and _normalize_cwd_for_compare(s.cwd) != normalized_cwd:
|
||||
continue
|
||||
persisted = persisted_rows.get(s.session_id, {})
|
||||
preview = next(
|
||||
(
|
||||
str(msg.get("content") or "").strip()
|
||||
for msg in s.history
|
||||
if msg.get("role") == "user" and str(msg.get("content") or "").strip()
|
||||
),
|
||||
persisted.get("preview") or "",
|
||||
)
|
||||
results.append(
|
||||
{
|
||||
"session_id": s.session_id,
|
||||
"cwd": s.cwd,
|
||||
"model": s.model,
|
||||
"history_len": history_len,
|
||||
"title": _build_session_title(persisted.get("title"), preview, s.cwd),
|
||||
"updated_at": _format_updated_at(
|
||||
persisted.get("last_active") or persisted.get("started_at") or time.time()
|
||||
),
|
||||
}
|
||||
)
|
||||
results = [
|
||||
{
|
||||
"session_id": s.session_id,
|
||||
"cwd": s.cwd,
|
||||
"model": s.model,
|
||||
"history_len": len(s.history),
|
||||
}
|
||||
for s in self._sessions.values()
|
||||
]
|
||||
|
||||
# Merge any persisted sessions not currently in memory.
|
||||
for sid, row in persisted_rows.items():
|
||||
if sid in seen_ids:
|
||||
continue
|
||||
message_count = int(row.get("message_count") or 0)
|
||||
if message_count <= 0:
|
||||
continue
|
||||
# Extract cwd from model_config JSON.
|
||||
session_cwd = "."
|
||||
mc = row.get("model_config")
|
||||
if mc:
|
||||
try:
|
||||
session_cwd = json.loads(mc).get("cwd", ".")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
if normalized_cwd and _normalize_cwd_for_compare(session_cwd) != normalized_cwd:
|
||||
continue
|
||||
results.append({
|
||||
"session_id": sid,
|
||||
"cwd": session_cwd,
|
||||
"model": row.get("model") or "",
|
||||
"history_len": message_count,
|
||||
"title": _build_session_title(row.get("title"), row.get("preview"), session_cwd),
|
||||
"updated_at": _format_updated_at(row.get("last_active") or row.get("started_at")),
|
||||
})
|
||||
db = self._get_db()
|
||||
if db is not None:
|
||||
try:
|
||||
rows = db.search_sessions(source="acp", limit=1000)
|
||||
for row in rows:
|
||||
sid = row["id"]
|
||||
if sid in seen_ids:
|
||||
continue
|
||||
# Extract cwd from model_config JSON.
|
||||
cwd = "."
|
||||
mc = row.get("model_config")
|
||||
if mc:
|
||||
try:
|
||||
cwd = json.loads(mc).get("cwd", ".")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
results.append({
|
||||
"session_id": sid,
|
||||
"cwd": cwd,
|
||||
"model": row.get("model") or "",
|
||||
"history_len": row.get("message_count") or 0,
|
||||
})
|
||||
except Exception:
|
||||
logger.debug("Failed to list ACP sessions from DB", exc_info=True)
|
||||
|
||||
results.sort(key=lambda item: _updated_at_sort_key(item.get("updated_at")), reverse=True)
|
||||
return results
|
||||
|
||||
def update_cwd(self, session_id: str, cwd: str) -> Optional[SessionState]:
|
||||
"""Update the working directory for a session and its tool overrides."""
|
||||
cwd = _translate_acp_cwd(cwd)
|
||||
state = self.get_session(session_id) # checks DB too
|
||||
if state is None:
|
||||
return None
|
||||
@@ -412,6 +250,8 @@ class SessionManager:
|
||||
if self._db_instance is not None:
|
||||
return self._db_instance
|
||||
try:
|
||||
import os
|
||||
from pathlib import Path
|
||||
from hermes_state import SessionDB
|
||||
hermes_home = get_hermes_home()
|
||||
self._db_instance = SessionDB(db_path=hermes_home / "state.db")
|
||||
@@ -466,10 +306,17 @@ class SessionManager:
|
||||
except Exception:
|
||||
logger.debug("Failed to update ACP session metadata", exc_info=True)
|
||||
|
||||
# Replace stored messages with current history atomically so a
|
||||
# mid-rewrite failure rolls back and the previously persisted
|
||||
# conversation is preserved (salvaged from #13675).
|
||||
db.replace_messages(state.session_id, state.history)
|
||||
# Replace stored messages with current history.
|
||||
db.clear_messages(state.session_id)
|
||||
for msg in state.history:
|
||||
db.append_message(
|
||||
session_id=state.session_id,
|
||||
role=msg.get("role", "user"),
|
||||
content=msg.get("content"),
|
||||
tool_name=msg.get("tool_name") or msg.get("name"),
|
||||
tool_calls=msg.get("tool_calls"),
|
||||
tool_call_id=msg.get("tool_call_id"),
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("Failed to persist ACP session %s", state.session_id, exc_info=True)
|
||||
|
||||
@@ -587,18 +434,9 @@ class SessionManager:
|
||||
elif isinstance(model_cfg, str) and model_cfg.strip():
|
||||
default_model = model_cfg.strip()
|
||||
|
||||
configured_mcp_servers = [
|
||||
name
|
||||
for name, cfg in (config.get("mcp_servers") or {}).items()
|
||||
if not isinstance(cfg, dict) or cfg.get("enabled", True) is not False
|
||||
]
|
||||
|
||||
kwargs = {
|
||||
"platform": "acp",
|
||||
"enabled_toolsets": _expand_acp_enabled_toolsets(
|
||||
["hermes-acp"],
|
||||
mcp_server_names=configured_mcp_servers,
|
||||
),
|
||||
"enabled_toolsets": ["hermes-acp"],
|
||||
"quiet_mode": True,
|
||||
"session_id": session_id,
|
||||
"model": model or default_model,
|
||||
@@ -620,8 +458,4 @@ class SessionManager:
|
||||
logger.debug("ACP session falling back to default provider resolution", exc_info=True)
|
||||
|
||||
_register_task_cwd(session_id, cwd)
|
||||
agent = AIAgent(**kwargs)
|
||||
# ACP stdio transport requires stdout to remain protocol-only JSON-RPC.
|
||||
# Route any incidental human-readable agent output to stderr instead.
|
||||
agent._print_fn = _acp_stderr_print
|
||||
return agent
|
||||
return AIAgent(**kwargs)
|
||||
|
||||
1013
acp_adapter/tools.py
1013
acp_adapter/tools.py
File diff suppressed because it is too large
Load Diff
@@ -1,326 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from agent.anthropic_adapter import _is_oauth_token, resolve_anthropic_token
|
||||
from hermes_cli.auth import _read_codex_tokens, resolve_codex_runtime_credentials
|
||||
from hermes_cli.runtime_provider import resolve_runtime_provider
|
||||
|
||||
|
||||
def _utc_now() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AccountUsageWindow:
|
||||
label: str
|
||||
used_percent: Optional[float] = None
|
||||
reset_at: Optional[datetime] = None
|
||||
detail: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AccountUsageSnapshot:
|
||||
provider: str
|
||||
source: str
|
||||
fetched_at: datetime
|
||||
title: str = "Account limits"
|
||||
plan: Optional[str] = None
|
||||
windows: tuple[AccountUsageWindow, ...] = ()
|
||||
details: tuple[str, ...] = ()
|
||||
unavailable_reason: Optional[str] = None
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
return bool(self.windows or self.details) and not self.unavailable_reason
|
||||
|
||||
|
||||
def _title_case_slug(value: Optional[str]) -> Optional[str]:
|
||||
cleaned = str(value or "").strip()
|
||||
if not cleaned:
|
||||
return None
|
||||
return cleaned.replace("_", " ").replace("-", " ").title()
|
||||
|
||||
|
||||
def _parse_dt(value: Any) -> Optional[datetime]:
|
||||
if value in (None, ""):
|
||||
return None
|
||||
if isinstance(value, (int, float)):
|
||||
return datetime.fromtimestamp(float(value), tz=timezone.utc)
|
||||
if isinstance(value, str):
|
||||
text = value.strip()
|
||||
if not text:
|
||||
return None
|
||||
if text.endswith("Z"):
|
||||
text = text[:-1] + "+00:00"
|
||||
try:
|
||||
dt = datetime.fromisoformat(text)
|
||||
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
|
||||
except ValueError:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _format_reset(dt: Optional[datetime]) -> str:
|
||||
if not dt:
|
||||
return "unknown"
|
||||
local_dt = dt.astimezone()
|
||||
delta = dt - _utc_now()
|
||||
total_seconds = int(delta.total_seconds())
|
||||
if total_seconds <= 0:
|
||||
return f"now ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
|
||||
hours, rem = divmod(total_seconds, 3600)
|
||||
minutes = rem // 60
|
||||
if hours >= 24:
|
||||
days, hours = divmod(hours, 24)
|
||||
rel = f"in {days}d {hours}h"
|
||||
elif hours > 0:
|
||||
rel = f"in {hours}h {minutes}m"
|
||||
else:
|
||||
rel = f"in {minutes}m"
|
||||
return f"{rel} ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
|
||||
|
||||
|
||||
def render_account_usage_lines(snapshot: Optional[AccountUsageSnapshot], *, markdown: bool = False) -> list[str]:
|
||||
if not snapshot:
|
||||
return []
|
||||
header = f"📈 {'**' if markdown else ''}{snapshot.title}{'**' if markdown else ''}"
|
||||
lines = [header]
|
||||
if snapshot.plan:
|
||||
lines.append(f"Provider: {snapshot.provider} ({snapshot.plan})")
|
||||
else:
|
||||
lines.append(f"Provider: {snapshot.provider}")
|
||||
for window in snapshot.windows:
|
||||
if window.used_percent is None:
|
||||
base = f"{window.label}: unavailable"
|
||||
else:
|
||||
remaining = max(0, round(100 - float(window.used_percent)))
|
||||
used = max(0, round(float(window.used_percent)))
|
||||
base = f"{window.label}: {remaining}% remaining ({used}% used)"
|
||||
if window.reset_at:
|
||||
base += f" • resets {_format_reset(window.reset_at)}"
|
||||
elif window.detail:
|
||||
base += f" • {window.detail}"
|
||||
lines.append(base)
|
||||
for detail in snapshot.details:
|
||||
lines.append(detail)
|
||||
if snapshot.unavailable_reason:
|
||||
lines.append(f"Unavailable: {snapshot.unavailable_reason}")
|
||||
return lines
|
||||
|
||||
|
||||
def _resolve_codex_usage_url(base_url: str) -> str:
|
||||
normalized = (base_url or "").strip().rstrip("/")
|
||||
if not normalized:
|
||||
normalized = "https://chatgpt.com/backend-api/codex"
|
||||
if normalized.endswith("/codex"):
|
||||
normalized = normalized[: -len("/codex")]
|
||||
if "/backend-api" in normalized:
|
||||
return normalized + "/wham/usage"
|
||||
return normalized + "/api/codex/usage"
|
||||
|
||||
|
||||
def _fetch_codex_account_usage() -> Optional[AccountUsageSnapshot]:
|
||||
creds = resolve_codex_runtime_credentials(refresh_if_expiring=True)
|
||||
token_data = _read_codex_tokens()
|
||||
tokens = token_data.get("tokens") or {}
|
||||
account_id = str(tokens.get("account_id", "") or "").strip() or None
|
||||
headers = {
|
||||
"Authorization": f"Bearer {creds['api_key']}",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "codex-cli",
|
||||
}
|
||||
if account_id:
|
||||
headers["ChatGPT-Account-Id"] = account_id
|
||||
with httpx.Client(timeout=15.0) as client:
|
||||
response = client.get(_resolve_codex_usage_url(creds.get("base_url", "")), headers=headers)
|
||||
response.raise_for_status()
|
||||
payload = response.json() or {}
|
||||
rate_limit = payload.get("rate_limit") or {}
|
||||
windows: list[AccountUsageWindow] = []
|
||||
for key, label in (("primary_window", "Session"), ("secondary_window", "Weekly")):
|
||||
window = rate_limit.get(key) or {}
|
||||
used = window.get("used_percent")
|
||||
if used is None:
|
||||
continue
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label=label,
|
||||
used_percent=float(used),
|
||||
reset_at=_parse_dt(window.get("reset_at")),
|
||||
)
|
||||
)
|
||||
details: list[str] = []
|
||||
credits = payload.get("credits") or {}
|
||||
if credits.get("has_credits"):
|
||||
balance = credits.get("balance")
|
||||
if isinstance(balance, (int, float)):
|
||||
details.append(f"Credits balance: ${float(balance):.2f}")
|
||||
elif credits.get("unlimited"):
|
||||
details.append("Credits balance: unlimited")
|
||||
return AccountUsageSnapshot(
|
||||
provider="openai-codex",
|
||||
source="usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
plan=_title_case_slug(payload.get("plan_type")),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def _fetch_anthropic_account_usage() -> Optional[AccountUsageSnapshot]:
|
||||
token = (resolve_anthropic_token() or "").strip()
|
||||
if not token:
|
||||
return None
|
||||
if not _is_oauth_token(token):
|
||||
return AccountUsageSnapshot(
|
||||
provider="anthropic",
|
||||
source="oauth_usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
unavailable_reason="Anthropic account limits are only available for OAuth-backed Claude accounts.",
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json",
|
||||
"anthropic-beta": "oauth-2025-04-20",
|
||||
"User-Agent": "claude-code/2.1.0",
|
||||
}
|
||||
with httpx.Client(timeout=15.0) as client:
|
||||
response = client.get("https://api.anthropic.com/api/oauth/usage", headers=headers)
|
||||
response.raise_for_status()
|
||||
payload = response.json() or {}
|
||||
windows: list[AccountUsageWindow] = []
|
||||
mapping = (
|
||||
("five_hour", "Current session"),
|
||||
("seven_day", "Current week"),
|
||||
("seven_day_opus", "Opus week"),
|
||||
("seven_day_sonnet", "Sonnet week"),
|
||||
)
|
||||
for key, label in mapping:
|
||||
window = payload.get(key) or {}
|
||||
util = window.get("utilization")
|
||||
if util is None:
|
||||
continue
|
||||
used = float(util) * 100 if float(util) <= 1 else float(util)
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label=label,
|
||||
used_percent=used,
|
||||
reset_at=_parse_dt(window.get("resets_at")),
|
||||
)
|
||||
)
|
||||
details: list[str] = []
|
||||
extra = payload.get("extra_usage") or {}
|
||||
if extra.get("is_enabled"):
|
||||
used_credits = extra.get("used_credits")
|
||||
monthly_limit = extra.get("monthly_limit")
|
||||
currency = extra.get("currency") or "USD"
|
||||
if isinstance(used_credits, (int, float)) and isinstance(monthly_limit, (int, float)):
|
||||
details.append(
|
||||
f"Extra usage: {used_credits:.2f} / {monthly_limit:.2f} {currency}"
|
||||
)
|
||||
return AccountUsageSnapshot(
|
||||
provider="anthropic",
|
||||
source="oauth_usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def _fetch_openrouter_account_usage(base_url: Optional[str], api_key: Optional[str]) -> Optional[AccountUsageSnapshot]:
|
||||
runtime = resolve_runtime_provider(
|
||||
requested="openrouter",
|
||||
explicit_base_url=base_url,
|
||||
explicit_api_key=api_key,
|
||||
)
|
||||
token = str(runtime.get("api_key", "") or "").strip()
|
||||
if not token:
|
||||
return None
|
||||
normalized = str(runtime.get("base_url", "") or "").rstrip("/")
|
||||
credits_url = f"{normalized}/credits"
|
||||
key_url = f"{normalized}/key"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
with httpx.Client(timeout=10.0) as client:
|
||||
credits_resp = client.get(credits_url, headers=headers)
|
||||
credits_resp.raise_for_status()
|
||||
credits = (credits_resp.json() or {}).get("data") or {}
|
||||
try:
|
||||
key_resp = client.get(key_url, headers=headers)
|
||||
key_resp.raise_for_status()
|
||||
key_data = (key_resp.json() or {}).get("data") or {}
|
||||
except Exception:
|
||||
key_data = {}
|
||||
total_credits = float(credits.get("total_credits") or 0.0)
|
||||
total_usage = float(credits.get("total_usage") or 0.0)
|
||||
details = [f"Credits balance: ${max(0.0, total_credits - total_usage):.2f}"]
|
||||
windows: list[AccountUsageWindow] = []
|
||||
limit = key_data.get("limit")
|
||||
limit_remaining = key_data.get("limit_remaining")
|
||||
limit_reset = str(key_data.get("limit_reset") or "").strip()
|
||||
usage = key_data.get("usage")
|
||||
if (
|
||||
isinstance(limit, (int, float))
|
||||
and float(limit) > 0
|
||||
and isinstance(limit_remaining, (int, float))
|
||||
and 0 <= float(limit_remaining) <= float(limit)
|
||||
):
|
||||
limit_value = float(limit)
|
||||
remaining_value = float(limit_remaining)
|
||||
used_percent = ((limit_value - remaining_value) / limit_value) * 100
|
||||
detail_parts = [f"${remaining_value:.2f} of ${limit_value:.2f} remaining"]
|
||||
if limit_reset:
|
||||
detail_parts.append(f"resets {limit_reset}")
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label="API key quota",
|
||||
used_percent=used_percent,
|
||||
detail=" • ".join(detail_parts),
|
||||
)
|
||||
)
|
||||
if isinstance(usage, (int, float)):
|
||||
usage_parts = [f"API key usage: ${float(usage):.2f} total"]
|
||||
for value, label in (
|
||||
(key_data.get("usage_daily"), "today"),
|
||||
(key_data.get("usage_weekly"), "this week"),
|
||||
(key_data.get("usage_monthly"), "this month"),
|
||||
):
|
||||
if isinstance(value, (int, float)) and float(value) > 0:
|
||||
usage_parts.append(f"${float(value):.2f} {label}")
|
||||
details.append(" • ".join(usage_parts))
|
||||
return AccountUsageSnapshot(
|
||||
provider="openrouter",
|
||||
source="credits_api",
|
||||
fetched_at=_utc_now(),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def fetch_account_usage(
|
||||
provider: Optional[str],
|
||||
*,
|
||||
base_url: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
) -> Optional[AccountUsageSnapshot]:
|
||||
normalized = str(provider or "").strip().lower()
|
||||
if normalized in {"", "auto", "custom"}:
|
||||
return None
|
||||
try:
|
||||
if normalized == "openai-codex":
|
||||
return _fetch_codex_account_usage()
|
||||
if normalized == "anthropic":
|
||||
return _fetch_anthropic_account_usage()
|
||||
if normalized == "openrouter":
|
||||
return _fetch_openrouter_account_usage(base_url, api_key)
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,999 +0,0 @@
|
||||
"""Codex Responses API adapter.
|
||||
|
||||
Pure format-conversion and normalization logic for the OpenAI Responses API
|
||||
(used by OpenAI Codex, xAI, GitHub Models, and other Responses-compatible endpoints).
|
||||
|
||||
Extracted from run_agent.py to isolate Responses API-specific logic from the
|
||||
core agent loop. All functions are stateless — they operate on the data passed
|
||||
in and return transformed results.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.prompt_builder import DEFAULT_AGENT_IDENTITY
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Matches Codex/Harmony tool-call serialization that occasionally leaks into
|
||||
# assistant-message content when the model fails to emit a structured
|
||||
# ``function_call`` item. Accepts the common forms:
|
||||
#
|
||||
# to=functions.exec_command
|
||||
# assistant to=functions.exec_command
|
||||
# <|channel|>commentary to=functions.exec_command
|
||||
#
|
||||
# ``to=functions.<name>`` is the stable marker — the optional ``assistant`` or
|
||||
# Harmony channel prefix varies by degeneration mode. Case-insensitive to
|
||||
# cover lowercase/uppercase ``assistant`` variants.
|
||||
_TOOL_CALL_LEAK_PATTERN = re.compile(
|
||||
r"(?:^|[\s>|])to=functions\.[A-Za-z_][\w.]*",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Multimodal content helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _chat_content_to_responses_parts(content: Any, *, role: str = "user") -> List[Dict[str, Any]]:
|
||||
"""Convert chat-style multimodal content to Responses API input parts.
|
||||
|
||||
Input: ``[{"type":"text"|"image_url", ...}]`` (native OpenAI Chat format)
|
||||
Output: ``[{"type":"input_text"|"output_text"|"input_image", ...}]`` (Responses format)
|
||||
|
||||
The ``role`` parameter controls the text content type:
|
||||
- ``"user"`` (default) → ``"input_text"``
|
||||
- ``"assistant"`` → ``"output_text"``
|
||||
|
||||
The Responses API rejects ``input_text`` inside assistant messages and
|
||||
``output_text`` inside user messages, so callers MUST pass the correct
|
||||
role for the message being converted.
|
||||
|
||||
Returns an empty list when ``content`` is not a list or contains no
|
||||
recognized parts — callers fall back to the string path.
|
||||
"""
|
||||
text_type = "output_text" if role == "assistant" else "input_text"
|
||||
if not isinstance(content, list):
|
||||
return []
|
||||
converted: List[Dict[str, Any]] = []
|
||||
for part in content:
|
||||
if isinstance(part, str):
|
||||
if part:
|
||||
converted.append({"type": text_type, "text": part})
|
||||
continue
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
ptype = str(part.get("type") or "").strip().lower()
|
||||
if ptype in {"text", "input_text", "output_text"}:
|
||||
text = part.get("text")
|
||||
if isinstance(text, str) and text:
|
||||
converted.append({"type": text_type, "text": text})
|
||||
continue
|
||||
if ptype in {"image_url", "input_image"}:
|
||||
image_ref = part.get("image_url")
|
||||
detail = part.get("detail")
|
||||
if isinstance(image_ref, dict):
|
||||
url = image_ref.get("url")
|
||||
detail = image_ref.get("detail", detail)
|
||||
else:
|
||||
url = image_ref
|
||||
if not isinstance(url, str) or not url:
|
||||
continue
|
||||
image_part: Dict[str, Any] = {"type": "input_image", "image_url": url}
|
||||
if isinstance(detail, str) and detail.strip():
|
||||
image_part["detail"] = detail.strip()
|
||||
converted.append(image_part)
|
||||
return converted
|
||||
|
||||
|
||||
def _summarize_user_message_for_log(content: Any) -> str:
|
||||
"""Return a short text summary of a user message for logging/trajectory.
|
||||
|
||||
Multimodal messages arrive as a list of ``{type:"text"|"image_url", ...}``
|
||||
parts from the API server. Logging, spinner previews, and trajectory
|
||||
files all want a plain string — this helper extracts the first chunk of
|
||||
text and notes any attached images. Returns an empty string for empty
|
||||
lists and ``str(content)`` for unexpected scalar types.
|
||||
"""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
text_bits: List[str] = []
|
||||
image_count = 0
|
||||
for part in content:
|
||||
if isinstance(part, str):
|
||||
if part:
|
||||
text_bits.append(part)
|
||||
continue
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
ptype = str(part.get("type") or "").strip().lower()
|
||||
if ptype in {"text", "input_text", "output_text"}:
|
||||
text = part.get("text")
|
||||
if isinstance(text, str) and text:
|
||||
text_bits.append(text)
|
||||
elif ptype in {"image_url", "input_image"}:
|
||||
image_count += 1
|
||||
summary = " ".join(text_bits).strip()
|
||||
if image_count:
|
||||
note = f"[{image_count} image{'s' if image_count != 1 else ''}]"
|
||||
summary = f"{note} {summary}" if summary else note
|
||||
return summary
|
||||
try:
|
||||
return str(content)
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ID helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _deterministic_call_id(fn_name: str, arguments: str, index: int = 0) -> str:
|
||||
"""Generate a deterministic call_id from tool call content.
|
||||
|
||||
Used as a fallback when the API doesn't provide a call_id.
|
||||
Deterministic IDs prevent cache invalidation — random UUIDs would
|
||||
make every API call's prefix unique, breaking OpenAI's prompt cache.
|
||||
"""
|
||||
seed = f"{fn_name}:{arguments}:{index}"
|
||||
digest = hashlib.sha256(seed.encode("utf-8", errors="replace")).hexdigest()[:12]
|
||||
return f"call_{digest}"
|
||||
|
||||
|
||||
def _split_responses_tool_id(raw_id: Any) -> tuple[Optional[str], Optional[str]]:
|
||||
"""Split a stored tool id into (call_id, response_item_id)."""
|
||||
if not isinstance(raw_id, str):
|
||||
return None, None
|
||||
value = raw_id.strip()
|
||||
if not value:
|
||||
return None, None
|
||||
if "|" in value:
|
||||
call_id, response_item_id = value.split("|", 1)
|
||||
call_id = call_id.strip() or None
|
||||
response_item_id = response_item_id.strip() or None
|
||||
return call_id, response_item_id
|
||||
if value.startswith("fc_"):
|
||||
return None, value
|
||||
return value, None
|
||||
|
||||
|
||||
def _derive_responses_function_call_id(
|
||||
call_id: str,
|
||||
response_item_id: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Build a valid Responses `function_call.id` (must start with `fc_`)."""
|
||||
if isinstance(response_item_id, str):
|
||||
candidate = response_item_id.strip()
|
||||
if candidate.startswith("fc_"):
|
||||
return candidate
|
||||
|
||||
source = (call_id or "").strip()
|
||||
if source.startswith("fc_"):
|
||||
return source
|
||||
if source.startswith("call_") and len(source) > len("call_"):
|
||||
return f"fc_{source[len('call_'):]}"
|
||||
|
||||
sanitized = re.sub(r"[^A-Za-z0-9_-]", "", source)
|
||||
if sanitized.startswith("fc_"):
|
||||
return sanitized
|
||||
if sanitized.startswith("call_") and len(sanitized) > len("call_"):
|
||||
return f"fc_{sanitized[len('call_'):]}"
|
||||
if sanitized:
|
||||
return f"fc_{sanitized[:48]}"
|
||||
|
||||
seed = source or str(response_item_id or "") or uuid.uuid4().hex
|
||||
digest = hashlib.sha1(seed.encode("utf-8")).hexdigest()[:24]
|
||||
return f"fc_{digest}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Schema conversion
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _responses_tools(tools: Optional[List[Dict[str, Any]]] = None) -> Optional[List[Dict[str, Any]]]:
|
||||
"""Convert chat-completions tool schemas to Responses function-tool schemas."""
|
||||
if not tools:
|
||||
return None
|
||||
|
||||
converted: List[Dict[str, Any]] = []
|
||||
for item in tools:
|
||||
fn = item.get("function", {}) if isinstance(item, dict) else {}
|
||||
name = fn.get("name")
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
continue
|
||||
converted.append({
|
||||
"type": "function",
|
||||
"name": name,
|
||||
"description": fn.get("description", ""),
|
||||
"strict": False,
|
||||
"parameters": fn.get("parameters", {"type": "object", "properties": {}}),
|
||||
})
|
||||
return converted or None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Message format conversion
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_RESPONSE_MESSAGE_STATUSES = {"completed", "incomplete", "in_progress"}
|
||||
|
||||
|
||||
def _normalize_responses_message_status(value: Any, *, default: str = "completed") -> str:
|
||||
"""Normalize a Responses assistant message status for replay.
|
||||
|
||||
The API accepts completed/incomplete/in_progress on replayed assistant
|
||||
output messages. Preserve those exactly (modulo case/hyphen spelling) so
|
||||
incomplete Codex continuation turns don't get falsely marked completed.
|
||||
"""
|
||||
if isinstance(value, str):
|
||||
status = value.strip().lower().replace("-", "_").replace(" ", "_")
|
||||
if status in _RESPONSE_MESSAGE_STATUSES:
|
||||
return status
|
||||
return default
|
||||
|
||||
|
||||
def _chat_messages_to_responses_input(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Convert internal chat-style messages to Responses input items."""
|
||||
items: List[Dict[str, Any]] = []
|
||||
seen_item_ids: set = set()
|
||||
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
role = msg.get("role")
|
||||
if role == "system":
|
||||
continue
|
||||
|
||||
if role in {"user", "assistant"}:
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, list):
|
||||
content_parts = _chat_content_to_responses_parts(content, role=role)
|
||||
text_type = "output_text" if role == "assistant" else "input_text"
|
||||
content_text = "".join(
|
||||
p.get("text", "") for p in content_parts if p.get("type") == text_type
|
||||
)
|
||||
else:
|
||||
content_parts = []
|
||||
content_text = str(content) if content is not None else ""
|
||||
|
||||
if role == "assistant":
|
||||
# Replay encrypted reasoning items from previous turns
|
||||
# so the API can maintain coherent reasoning chains.
|
||||
codex_reasoning = msg.get("codex_reasoning_items")
|
||||
has_codex_reasoning = False
|
||||
if isinstance(codex_reasoning, list):
|
||||
for ri in codex_reasoning:
|
||||
if isinstance(ri, dict) and ri.get("encrypted_content"):
|
||||
item_id = ri.get("id")
|
||||
if item_id and item_id in seen_item_ids:
|
||||
continue
|
||||
# Strip the "id" field — with store=False the
|
||||
# Responses API cannot look up items by ID and
|
||||
# returns 404. The encrypted_content blob is
|
||||
# self-contained for reasoning chain continuity.
|
||||
replay_item = {k: v for k, v in ri.items() if k != "id"}
|
||||
items.append(replay_item)
|
||||
if item_id:
|
||||
seen_item_ids.add(item_id)
|
||||
has_codex_reasoning = True
|
||||
|
||||
# Replay exact assistant message items (with id/phase) from
|
||||
# previous turns so the API can maintain prefix-cache hits.
|
||||
# OpenAI docs: "preserve and resend phase on all assistant
|
||||
# messages — dropping it can degrade performance."
|
||||
codex_message_items = msg.get("codex_message_items")
|
||||
replayed_message_items = 0
|
||||
if isinstance(codex_message_items, list):
|
||||
for raw_item in codex_message_items:
|
||||
if not isinstance(raw_item, dict):
|
||||
continue
|
||||
if raw_item.get("type") != "message" or raw_item.get("role") != "assistant":
|
||||
continue
|
||||
raw_content_parts = raw_item.get("content")
|
||||
if not isinstance(raw_content_parts, list):
|
||||
continue
|
||||
|
||||
normalized_content_parts = []
|
||||
for part in raw_content_parts:
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
part_type = str(part.get("type") or "").strip()
|
||||
if part_type not in {"output_text", "text"}:
|
||||
continue
|
||||
text = part.get("text", "")
|
||||
if text is None:
|
||||
text = ""
|
||||
if not isinstance(text, str):
|
||||
text = str(text)
|
||||
normalized_content_parts.append({"type": "output_text", "text": text})
|
||||
|
||||
if not normalized_content_parts:
|
||||
continue
|
||||
|
||||
replay_item = {
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"status": _normalize_responses_message_status(raw_item.get("status")),
|
||||
"content": normalized_content_parts,
|
||||
}
|
||||
item_id = raw_item.get("id")
|
||||
if isinstance(item_id, str) and item_id.strip():
|
||||
replay_item["id"] = item_id.strip()
|
||||
phase = raw_item.get("phase")
|
||||
if isinstance(phase, str) and phase.strip():
|
||||
replay_item["phase"] = phase.strip()
|
||||
items.append(replay_item)
|
||||
replayed_message_items += 1
|
||||
|
||||
if replayed_message_items > 0:
|
||||
pass
|
||||
elif content_parts:
|
||||
items.append({"role": "assistant", "content": content_parts})
|
||||
elif content_text.strip():
|
||||
items.append({"role": "assistant", "content": content_text})
|
||||
elif has_codex_reasoning:
|
||||
# The Responses API requires a following item after each
|
||||
# reasoning item (otherwise: missing_following_item error).
|
||||
# When the assistant produced only reasoning with no visible
|
||||
# content, emit an empty assistant message as the required
|
||||
# following item.
|
||||
items.append({"role": "assistant", "content": ""})
|
||||
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if isinstance(tool_calls, list):
|
||||
for tc in tool_calls:
|
||||
if not isinstance(tc, dict):
|
||||
continue
|
||||
fn = tc.get("function", {})
|
||||
fn_name = fn.get("name")
|
||||
if not isinstance(fn_name, str) or not fn_name.strip():
|
||||
continue
|
||||
|
||||
embedded_call_id, embedded_response_item_id = _split_responses_tool_id(
|
||||
tc.get("id")
|
||||
)
|
||||
call_id = tc.get("call_id")
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
call_id = embedded_call_id
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
if (
|
||||
isinstance(embedded_response_item_id, str)
|
||||
and embedded_response_item_id.startswith("fc_")
|
||||
and len(embedded_response_item_id) > len("fc_")
|
||||
):
|
||||
call_id = f"call_{embedded_response_item_id[len('fc_'):]}"
|
||||
else:
|
||||
_raw_args = str(fn.get("arguments", "{}"))
|
||||
call_id = _deterministic_call_id(fn_name, _raw_args, len(items))
|
||||
call_id = call_id.strip()
|
||||
|
||||
arguments = fn.get("arguments", "{}")
|
||||
if isinstance(arguments, dict):
|
||||
arguments = json.dumps(arguments, ensure_ascii=False)
|
||||
elif not isinstance(arguments, str):
|
||||
arguments = str(arguments)
|
||||
arguments = arguments.strip() or "{}"
|
||||
|
||||
items.append({
|
||||
"type": "function_call",
|
||||
"call_id": call_id,
|
||||
"name": fn_name,
|
||||
"arguments": arguments,
|
||||
})
|
||||
continue
|
||||
|
||||
# Non-assistant (user) role: emit multimodal parts when present,
|
||||
# otherwise fall back to the text payload.
|
||||
if content_parts:
|
||||
items.append({"role": role, "content": content_parts})
|
||||
else:
|
||||
items.append({"role": role, "content": content_text})
|
||||
continue
|
||||
|
||||
if role == "tool":
|
||||
raw_tool_call_id = msg.get("tool_call_id")
|
||||
call_id, _ = _split_responses_tool_id(raw_tool_call_id)
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
if isinstance(raw_tool_call_id, str) and raw_tool_call_id.strip():
|
||||
call_id = raw_tool_call_id.strip()
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
continue
|
||||
items.append({
|
||||
"type": "function_call_output",
|
||||
"call_id": call_id,
|
||||
"output": str(msg.get("content", "") or ""),
|
||||
})
|
||||
|
||||
return items
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Input preflight / validation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _preflight_codex_input_items(raw_items: Any) -> List[Dict[str, Any]]:
|
||||
if not isinstance(raw_items, list):
|
||||
raise ValueError("Codex Responses input must be a list of input items.")
|
||||
|
||||
normalized: List[Dict[str, Any]] = []
|
||||
seen_ids: set = set()
|
||||
for idx, item in enumerate(raw_items):
|
||||
if not isinstance(item, dict):
|
||||
raise ValueError(f"Codex Responses input[{idx}] must be an object.")
|
||||
|
||||
item_type = item.get("type")
|
||||
if item_type == "function_call":
|
||||
call_id = item.get("call_id")
|
||||
name = item.get("name")
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
raise ValueError(f"Codex Responses input[{idx}] function_call is missing call_id.")
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
raise ValueError(f"Codex Responses input[{idx}] function_call is missing name.")
|
||||
|
||||
arguments = item.get("arguments", "{}")
|
||||
if isinstance(arguments, dict):
|
||||
arguments = json.dumps(arguments, ensure_ascii=False)
|
||||
elif not isinstance(arguments, str):
|
||||
arguments = str(arguments)
|
||||
arguments = arguments.strip() or "{}"
|
||||
|
||||
normalized.append(
|
||||
{
|
||||
"type": "function_call",
|
||||
"call_id": call_id.strip(),
|
||||
"name": name.strip(),
|
||||
"arguments": arguments,
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
if item_type == "function_call_output":
|
||||
call_id = item.get("call_id")
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
raise ValueError(f"Codex Responses input[{idx}] function_call_output is missing call_id.")
|
||||
output = item.get("output", "")
|
||||
if output is None:
|
||||
output = ""
|
||||
if not isinstance(output, str):
|
||||
output = str(output)
|
||||
|
||||
normalized.append(
|
||||
{
|
||||
"type": "function_call_output",
|
||||
"call_id": call_id.strip(),
|
||||
"output": output,
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
if item_type == "reasoning":
|
||||
encrypted = item.get("encrypted_content")
|
||||
if isinstance(encrypted, str) and encrypted:
|
||||
item_id = item.get("id")
|
||||
if isinstance(item_id, str) and item_id:
|
||||
if item_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(item_id)
|
||||
reasoning_item = {"type": "reasoning", "encrypted_content": encrypted}
|
||||
# Do NOT include the "id" in the outgoing item — with
|
||||
# store=False (our default) the API tries to resolve the
|
||||
# id server-side and returns 404. The id is still used
|
||||
# above for local deduplication via seen_ids.
|
||||
summary = item.get("summary")
|
||||
if isinstance(summary, list):
|
||||
reasoning_item["summary"] = summary
|
||||
else:
|
||||
reasoning_item["summary"] = []
|
||||
normalized.append(reasoning_item)
|
||||
continue
|
||||
|
||||
if item_type == "message":
|
||||
role = item.get("role")
|
||||
if role != "assistant":
|
||||
raise ValueError(f"Codex Responses input[{idx}] message items must have role='assistant'.")
|
||||
content = item.get("content")
|
||||
if not isinstance(content, list):
|
||||
raise ValueError(f"Codex Responses input[{idx}] message item must have content list.")
|
||||
normalized_content = []
|
||||
for part_idx, part in enumerate(content):
|
||||
if not isinstance(part, dict):
|
||||
raise ValueError(
|
||||
f"Codex Responses input[{idx}] message content[{part_idx}] must be an object."
|
||||
)
|
||||
part_type = part.get("type")
|
||||
if part_type not in {"output_text", "text"}:
|
||||
raise ValueError(
|
||||
f"Codex Responses input[{idx}] message content[{part_idx}] has unsupported type {part_type!r}."
|
||||
)
|
||||
text = part.get("text", "")
|
||||
if text is None:
|
||||
text = ""
|
||||
if not isinstance(text, str):
|
||||
text = str(text)
|
||||
normalized_content.append({"type": "output_text", "text": text})
|
||||
if not normalized_content:
|
||||
raise ValueError(f"Codex Responses input[{idx}] message item must contain at least one text part.")
|
||||
normalized_item: Dict[str, Any] = {
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"status": _normalize_responses_message_status(item.get("status")),
|
||||
"content": normalized_content,
|
||||
}
|
||||
item_id = item.get("id")
|
||||
if isinstance(item_id, str) and item_id.strip():
|
||||
normalized_item["id"] = item_id.strip()
|
||||
phase = item.get("phase")
|
||||
if isinstance(phase, str) and phase.strip():
|
||||
normalized_item["phase"] = phase.strip()
|
||||
normalized.append(normalized_item)
|
||||
continue
|
||||
|
||||
role = item.get("role")
|
||||
if role in {"user", "assistant"}:
|
||||
content = item.get("content", "")
|
||||
if content is None:
|
||||
content = ""
|
||||
if isinstance(content, list):
|
||||
# Multimodal content from ``_chat_messages_to_responses_input``
|
||||
# is already in Responses format (``input_text`` / ``output_text``
|
||||
# / ``input_image``). Validate each part and pass through.
|
||||
# Use the correct text type for the role — ``output_text`` for
|
||||
# assistant messages, ``input_text`` for user messages.
|
||||
text_type = "output_text" if role == "assistant" else "input_text"
|
||||
validated: List[Dict[str, Any]] = []
|
||||
for part_idx, part in enumerate(content):
|
||||
if isinstance(part, str):
|
||||
if part:
|
||||
validated.append({"type": text_type, "text": part})
|
||||
continue
|
||||
if not isinstance(part, dict):
|
||||
raise ValueError(
|
||||
f"Codex Responses input[{idx}].content[{part_idx}] must be an object or string."
|
||||
)
|
||||
ptype = str(part.get("type") or "").strip().lower()
|
||||
if ptype in {"input_text", "text", "output_text"}:
|
||||
text = part.get("text", "")
|
||||
if not isinstance(text, str):
|
||||
text = str(text or "")
|
||||
validated.append({"type": text_type, "text": text})
|
||||
elif ptype in {"input_image", "image_url"}:
|
||||
image_ref = part.get("image_url", "")
|
||||
detail = part.get("detail")
|
||||
if isinstance(image_ref, dict):
|
||||
url = image_ref.get("url", "")
|
||||
detail = image_ref.get("detail", detail)
|
||||
else:
|
||||
url = image_ref
|
||||
if not isinstance(url, str):
|
||||
url = str(url or "")
|
||||
image_part: Dict[str, Any] = {"type": "input_image", "image_url": url}
|
||||
if isinstance(detail, str) and detail.strip():
|
||||
image_part["detail"] = detail.strip()
|
||||
validated.append(image_part)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Codex Responses input[{idx}].content[{part_idx}] has unsupported type {part.get('type')!r}."
|
||||
)
|
||||
normalized.append({"role": role, "content": validated})
|
||||
continue
|
||||
if not isinstance(content, str):
|
||||
content = str(content)
|
||||
|
||||
normalized.append({"role": role, "content": content})
|
||||
continue
|
||||
|
||||
raise ValueError(
|
||||
f"Codex Responses input[{idx}] has unsupported item shape (type={item_type!r}, role={role!r})."
|
||||
)
|
||||
|
||||
return normalized
|
||||
|
||||
|
||||
def _preflight_codex_api_kwargs(
|
||||
api_kwargs: Any,
|
||||
*,
|
||||
allow_stream: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
if not isinstance(api_kwargs, dict):
|
||||
raise ValueError("Codex Responses request must be a dict.")
|
||||
|
||||
required = {"model", "instructions", "input"}
|
||||
missing = [key for key in required if key not in api_kwargs]
|
||||
if missing:
|
||||
raise ValueError(f"Codex Responses request missing required field(s): {', '.join(sorted(missing))}.")
|
||||
|
||||
model = api_kwargs.get("model")
|
||||
if not isinstance(model, str) or not model.strip():
|
||||
raise ValueError("Codex Responses request 'model' must be a non-empty string.")
|
||||
model = model.strip()
|
||||
|
||||
instructions = api_kwargs.get("instructions")
|
||||
if instructions is None:
|
||||
instructions = ""
|
||||
if not isinstance(instructions, str):
|
||||
instructions = str(instructions)
|
||||
instructions = instructions.strip() or DEFAULT_AGENT_IDENTITY
|
||||
|
||||
normalized_input = _preflight_codex_input_items(api_kwargs.get("input"))
|
||||
|
||||
tools = api_kwargs.get("tools")
|
||||
normalized_tools = None
|
||||
if tools is not None:
|
||||
if not isinstance(tools, list):
|
||||
raise ValueError("Codex Responses request 'tools' must be a list when provided.")
|
||||
normalized_tools = []
|
||||
for idx, tool in enumerate(tools):
|
||||
if not isinstance(tool, dict):
|
||||
raise ValueError(f"Codex Responses tools[{idx}] must be an object.")
|
||||
if tool.get("type") != "function":
|
||||
raise ValueError(f"Codex Responses tools[{idx}] has unsupported type {tool.get('type')!r}.")
|
||||
|
||||
name = tool.get("name")
|
||||
parameters = tool.get("parameters")
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
raise ValueError(f"Codex Responses tools[{idx}] is missing a valid name.")
|
||||
if not isinstance(parameters, dict):
|
||||
raise ValueError(f"Codex Responses tools[{idx}] is missing valid parameters.")
|
||||
|
||||
description = tool.get("description", "")
|
||||
if description is None:
|
||||
description = ""
|
||||
if not isinstance(description, str):
|
||||
description = str(description)
|
||||
|
||||
strict = tool.get("strict", False)
|
||||
if not isinstance(strict, bool):
|
||||
strict = bool(strict)
|
||||
|
||||
normalized_tools.append(
|
||||
{
|
||||
"type": "function",
|
||||
"name": name.strip(),
|
||||
"description": description,
|
||||
"strict": strict,
|
||||
"parameters": parameters,
|
||||
}
|
||||
)
|
||||
|
||||
store = api_kwargs.get("store", False)
|
||||
if store is not False:
|
||||
raise ValueError("Codex Responses contract requires 'store' to be false.")
|
||||
|
||||
allowed_keys = {
|
||||
"model", "instructions", "input", "tools", "store",
|
||||
"reasoning", "include", "max_output_tokens", "temperature",
|
||||
"tool_choice", "parallel_tool_calls", "prompt_cache_key", "service_tier",
|
||||
"extra_headers",
|
||||
}
|
||||
normalized: Dict[str, Any] = {
|
||||
"model": model,
|
||||
"instructions": instructions,
|
||||
"input": normalized_input,
|
||||
"store": False,
|
||||
}
|
||||
if normalized_tools is not None:
|
||||
normalized["tools"] = normalized_tools
|
||||
|
||||
# Pass through reasoning config
|
||||
reasoning = api_kwargs.get("reasoning")
|
||||
if isinstance(reasoning, dict):
|
||||
normalized["reasoning"] = reasoning
|
||||
include = api_kwargs.get("include")
|
||||
if isinstance(include, list):
|
||||
normalized["include"] = include
|
||||
service_tier = api_kwargs.get("service_tier")
|
||||
if isinstance(service_tier, str) and service_tier.strip():
|
||||
normalized["service_tier"] = service_tier.strip()
|
||||
|
||||
# Pass through max_output_tokens and temperature
|
||||
max_output_tokens = api_kwargs.get("max_output_tokens")
|
||||
if isinstance(max_output_tokens, (int, float)) and max_output_tokens > 0:
|
||||
normalized["max_output_tokens"] = int(max_output_tokens)
|
||||
temperature = api_kwargs.get("temperature")
|
||||
if isinstance(temperature, (int, float)):
|
||||
normalized["temperature"] = float(temperature)
|
||||
|
||||
# Pass through tool_choice, parallel_tool_calls, prompt_cache_key
|
||||
for passthrough_key in ("tool_choice", "parallel_tool_calls", "prompt_cache_key"):
|
||||
val = api_kwargs.get(passthrough_key)
|
||||
if val is not None:
|
||||
normalized[passthrough_key] = val
|
||||
|
||||
extra_headers = api_kwargs.get("extra_headers")
|
||||
if extra_headers is not None:
|
||||
if not isinstance(extra_headers, dict):
|
||||
raise ValueError("Codex Responses request 'extra_headers' must be an object.")
|
||||
normalized_headers: Dict[str, str] = {}
|
||||
for key, value in extra_headers.items():
|
||||
if not isinstance(key, str) or not key.strip():
|
||||
raise ValueError("Codex Responses request 'extra_headers' keys must be non-empty strings.")
|
||||
if value is None:
|
||||
continue
|
||||
normalized_headers[key.strip()] = str(value)
|
||||
if normalized_headers:
|
||||
normalized["extra_headers"] = normalized_headers
|
||||
|
||||
if allow_stream:
|
||||
stream = api_kwargs.get("stream")
|
||||
if stream is not None and stream is not True:
|
||||
raise ValueError("Codex Responses 'stream' must be true when set.")
|
||||
if stream is True:
|
||||
normalized["stream"] = True
|
||||
allowed_keys.add("stream")
|
||||
elif "stream" in api_kwargs:
|
||||
raise ValueError("Codex Responses stream flag is only allowed in fallback streaming requests.")
|
||||
|
||||
unexpected = sorted(key for key in api_kwargs if key not in allowed_keys)
|
||||
if unexpected:
|
||||
raise ValueError(
|
||||
f"Codex Responses request has unsupported field(s): {', '.join(unexpected)}."
|
||||
)
|
||||
|
||||
return normalized
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Response extraction helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _extract_responses_message_text(item: Any) -> str:
|
||||
"""Extract assistant text from a Responses message output item."""
|
||||
content = getattr(item, "content", None)
|
||||
if not isinstance(content, list):
|
||||
return ""
|
||||
|
||||
chunks: List[str] = []
|
||||
for part in content:
|
||||
ptype = getattr(part, "type", None)
|
||||
if ptype not in {"output_text", "text"}:
|
||||
continue
|
||||
text = getattr(part, "text", None)
|
||||
if isinstance(text, str) and text:
|
||||
chunks.append(text)
|
||||
return "".join(chunks).strip()
|
||||
|
||||
|
||||
def _extract_responses_reasoning_text(item: Any) -> str:
|
||||
"""Extract a compact reasoning text from a Responses reasoning item."""
|
||||
summary = getattr(item, "summary", None)
|
||||
if isinstance(summary, list):
|
||||
chunks: List[str] = []
|
||||
for part in summary:
|
||||
text = getattr(part, "text", None)
|
||||
if isinstance(text, str) and text:
|
||||
chunks.append(text)
|
||||
if chunks:
|
||||
return "\n".join(chunks).strip()
|
||||
text = getattr(item, "text", None)
|
||||
if isinstance(text, str) and text:
|
||||
return text.strip()
|
||||
return ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Full response normalization
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _normalize_codex_response(response: Any) -> tuple[Any, str]:
|
||||
"""Normalize a Responses API object to an assistant_message-like object."""
|
||||
output = getattr(response, "output", None)
|
||||
if not isinstance(output, list) or not output:
|
||||
# The Codex backend can return empty output when the answer was
|
||||
# delivered entirely via stream events. Check output_text as a
|
||||
# last-resort fallback before raising.
|
||||
out_text = getattr(response, "output_text", None)
|
||||
if isinstance(out_text, str) and out_text.strip():
|
||||
logger.debug(
|
||||
"Codex response has empty output but output_text is present (%d chars); "
|
||||
"synthesizing output item.", len(out_text.strip()),
|
||||
)
|
||||
output = [SimpleNamespace(
|
||||
type="message", role="assistant", status="completed",
|
||||
content=[SimpleNamespace(type="output_text", text=out_text.strip())],
|
||||
)]
|
||||
response.output = output
|
||||
else:
|
||||
raise RuntimeError("Responses API returned no output items")
|
||||
|
||||
response_status = getattr(response, "status", None)
|
||||
if isinstance(response_status, str):
|
||||
response_status = response_status.strip().lower()
|
||||
else:
|
||||
response_status = None
|
||||
|
||||
if response_status in {"failed", "cancelled"}:
|
||||
error_obj = getattr(response, "error", None)
|
||||
if isinstance(error_obj, dict):
|
||||
error_msg = error_obj.get("message") or str(error_obj)
|
||||
else:
|
||||
error_msg = str(error_obj) if error_obj else f"Responses API returned status '{response_status}'"
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
content_parts: List[str] = []
|
||||
reasoning_parts: List[str] = []
|
||||
reasoning_items_raw: List[Dict[str, Any]] = []
|
||||
message_items_raw: List[Dict[str, Any]] = []
|
||||
tool_calls: List[Any] = []
|
||||
has_incomplete_items = response_status in {"queued", "in_progress", "incomplete"}
|
||||
saw_commentary_phase = False
|
||||
saw_final_answer_phase = False
|
||||
|
||||
for item in output:
|
||||
item_type = getattr(item, "type", None)
|
||||
item_status = getattr(item, "status", None)
|
||||
if isinstance(item_status, str):
|
||||
item_status = item_status.strip().lower()
|
||||
else:
|
||||
item_status = None
|
||||
|
||||
if item_status in {"queued", "in_progress", "incomplete"}:
|
||||
has_incomplete_items = True
|
||||
|
||||
if item_type == "message":
|
||||
item_phase = getattr(item, "phase", None)
|
||||
normalized_phase = None
|
||||
if isinstance(item_phase, str):
|
||||
normalized_phase = item_phase.strip().lower()
|
||||
if normalized_phase in {"commentary", "analysis"}:
|
||||
saw_commentary_phase = True
|
||||
elif normalized_phase in {"final_answer", "final"}:
|
||||
saw_final_answer_phase = True
|
||||
message_text = _extract_responses_message_text(item)
|
||||
if message_text:
|
||||
content_parts.append(message_text)
|
||||
raw_message_item: Dict[str, Any] = {
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"status": _normalize_responses_message_status(item_status),
|
||||
"content": [{"type": "output_text", "text": message_text}],
|
||||
}
|
||||
item_id = getattr(item, "id", None)
|
||||
if isinstance(item_id, str) and item_id:
|
||||
raw_message_item["id"] = item_id
|
||||
if normalized_phase:
|
||||
raw_message_item["phase"] = normalized_phase
|
||||
message_items_raw.append(raw_message_item)
|
||||
elif item_type == "reasoning":
|
||||
reasoning_text = _extract_responses_reasoning_text(item)
|
||||
if reasoning_text:
|
||||
reasoning_parts.append(reasoning_text)
|
||||
# Capture the full reasoning item for multi-turn continuity.
|
||||
# encrypted_content is an opaque blob the API needs back on
|
||||
# subsequent turns to maintain coherent reasoning chains.
|
||||
encrypted = getattr(item, "encrypted_content", None)
|
||||
if isinstance(encrypted, str) and encrypted:
|
||||
raw_item = {"type": "reasoning", "encrypted_content": encrypted}
|
||||
item_id = getattr(item, "id", None)
|
||||
if isinstance(item_id, str) and item_id:
|
||||
raw_item["id"] = item_id
|
||||
# Capture summary — required by the API when replaying reasoning items
|
||||
summary = getattr(item, "summary", None)
|
||||
if isinstance(summary, list):
|
||||
raw_summary = []
|
||||
for part in summary:
|
||||
text = getattr(part, "text", None)
|
||||
if isinstance(text, str):
|
||||
raw_summary.append({"type": "summary_text", "text": text})
|
||||
raw_item["summary"] = raw_summary
|
||||
reasoning_items_raw.append(raw_item)
|
||||
elif item_type == "function_call":
|
||||
if item_status in {"queued", "in_progress", "incomplete"}:
|
||||
continue
|
||||
fn_name = getattr(item, "name", "") or ""
|
||||
arguments = getattr(item, "arguments", "{}")
|
||||
if not isinstance(arguments, str):
|
||||
arguments = json.dumps(arguments, ensure_ascii=False)
|
||||
raw_call_id = getattr(item, "call_id", None)
|
||||
raw_item_id = getattr(item, "id", None)
|
||||
embedded_call_id, _ = _split_responses_tool_id(raw_item_id)
|
||||
call_id = raw_call_id if isinstance(raw_call_id, str) and raw_call_id.strip() else embedded_call_id
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
call_id = _deterministic_call_id(fn_name, arguments, len(tool_calls))
|
||||
call_id = call_id.strip()
|
||||
response_item_id = raw_item_id if isinstance(raw_item_id, str) else None
|
||||
response_item_id = _derive_responses_function_call_id(call_id, response_item_id)
|
||||
tool_calls.append(SimpleNamespace(
|
||||
id=call_id,
|
||||
call_id=call_id,
|
||||
response_item_id=response_item_id,
|
||||
type="function",
|
||||
function=SimpleNamespace(name=fn_name, arguments=arguments),
|
||||
))
|
||||
elif item_type == "custom_tool_call":
|
||||
fn_name = getattr(item, "name", "") or ""
|
||||
arguments = getattr(item, "input", "{}")
|
||||
if not isinstance(arguments, str):
|
||||
arguments = json.dumps(arguments, ensure_ascii=False)
|
||||
raw_call_id = getattr(item, "call_id", None)
|
||||
raw_item_id = getattr(item, "id", None)
|
||||
embedded_call_id, _ = _split_responses_tool_id(raw_item_id)
|
||||
call_id = raw_call_id if isinstance(raw_call_id, str) and raw_call_id.strip() else embedded_call_id
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
call_id = _deterministic_call_id(fn_name, arguments, len(tool_calls))
|
||||
call_id = call_id.strip()
|
||||
response_item_id = raw_item_id if isinstance(raw_item_id, str) else None
|
||||
response_item_id = _derive_responses_function_call_id(call_id, response_item_id)
|
||||
tool_calls.append(SimpleNamespace(
|
||||
id=call_id,
|
||||
call_id=call_id,
|
||||
response_item_id=response_item_id,
|
||||
type="function",
|
||||
function=SimpleNamespace(name=fn_name, arguments=arguments),
|
||||
))
|
||||
|
||||
final_text = "\n".join([p for p in content_parts if p]).strip()
|
||||
if not final_text and hasattr(response, "output_text"):
|
||||
out_text = getattr(response, "output_text", "")
|
||||
if isinstance(out_text, str):
|
||||
final_text = out_text.strip()
|
||||
|
||||
# ── Tool-call leak recovery ──────────────────────────────────
|
||||
# gpt-5.x on the Codex Responses API sometimes degenerates and emits
|
||||
# what should be a structured `function_call` item as plain assistant
|
||||
# text using the Harmony/Codex serialization (``to=functions.foo
|
||||
# {json}`` or ``assistant to=functions.foo {json}``). The model
|
||||
# intended to call a tool, but the intent never made it into
|
||||
# ``response.output`` as a ``function_call`` item, so ``tool_calls``
|
||||
# is empty here. If we pass this through, the parent sees a
|
||||
# confident-looking summary with no audit trail (empty ``tool_trace``)
|
||||
# and no tools actually ran — the Taiwan-embassy-email incident.
|
||||
#
|
||||
# Detection: leaked tokens always contain ``to=functions.<name>`` and
|
||||
# the assistant message has no real tool calls. Treat it as incomplete
|
||||
# so the existing Codex-incomplete continuation path (3 retries,
|
||||
# handled in run_agent.py) gets a chance to re-elicit a proper
|
||||
# ``function_call`` item. The existing loop already handles message
|
||||
# append, dedup, and retry budget.
|
||||
leaked_tool_call_text = False
|
||||
if final_text and not tool_calls and _TOOL_CALL_LEAK_PATTERN.search(final_text):
|
||||
leaked_tool_call_text = True
|
||||
logger.warning(
|
||||
"Codex response contains leaked tool-call text in assistant content "
|
||||
"(no structured function_call items). Treating as incomplete so the "
|
||||
"continuation path can re-elicit a proper tool call. Leaked snippet: %r",
|
||||
final_text[:300],
|
||||
)
|
||||
# Clear the text so downstream code doesn't surface the garbage as
|
||||
# a summary. The encrypted reasoning items (if any) are preserved
|
||||
# so the model keeps its chain-of-thought on the retry.
|
||||
final_text = ""
|
||||
|
||||
assistant_message = SimpleNamespace(
|
||||
content=final_text,
|
||||
tool_calls=tool_calls,
|
||||
reasoning="\n\n".join(reasoning_parts).strip() if reasoning_parts else None,
|
||||
reasoning_content=None,
|
||||
reasoning_details=None,
|
||||
codex_reasoning_items=reasoning_items_raw or None,
|
||||
codex_message_items=message_items_raw or None,
|
||||
)
|
||||
|
||||
if tool_calls:
|
||||
finish_reason = "tool_calls"
|
||||
elif leaked_tool_call_text:
|
||||
finish_reason = "incomplete"
|
||||
elif has_incomplete_items or (saw_commentary_phase and not saw_final_answer_phase):
|
||||
finish_reason = "incomplete"
|
||||
elif reasoning_items_raw and not final_text:
|
||||
# Response contains only reasoning (encrypted thinking state) with
|
||||
# no visible content or tool calls. The model is still thinking and
|
||||
# needs another turn to produce the actual answer. Marking this as
|
||||
# "stop" would send it into the empty-content retry loop which burns
|
||||
# 3 retries then fails — treat it as incomplete instead so the Codex
|
||||
# continuation path handles it correctly.
|
||||
finish_reason = "incomplete"
|
||||
else:
|
||||
finish_reason = "stop"
|
||||
return assistant_message, finish_reason
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,206 +0,0 @@
|
||||
"""Abstract base class for pluggable context engines.
|
||||
|
||||
A context engine controls how conversation context is managed when
|
||||
approaching the model's token limit. The built-in ContextCompressor
|
||||
is the default implementation. Third-party engines (e.g. LCM) can
|
||||
replace it via the plugin system or by being placed in the
|
||||
``plugins/context_engine/<name>/`` directory.
|
||||
|
||||
Selection is config-driven: ``context.engine`` in config.yaml.
|
||||
Default is ``"compressor"`` (the built-in). Only one engine is active.
|
||||
|
||||
The engine is responsible for:
|
||||
- Deciding when compaction should fire
|
||||
- Performing compaction (summarization, DAG construction, etc.)
|
||||
- Optionally exposing tools the agent can call (e.g. lcm_grep)
|
||||
- Tracking token usage from API responses
|
||||
|
||||
Lifecycle:
|
||||
1. Engine is instantiated and registered (plugin register() or default)
|
||||
2. on_session_start() called when a conversation begins
|
||||
3. update_from_response() called after each API response with usage data
|
||||
4. should_compress() checked after each turn
|
||||
5. compress() called when should_compress() returns True
|
||||
6. on_session_end() called at real session boundaries (CLI exit, /reset,
|
||||
gateway session expiry) — NOT per-turn
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
class ContextEngine(ABC):
|
||||
"""Base class all context engines must implement."""
|
||||
|
||||
# -- Identity ----------------------------------------------------------
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def name(self) -> str:
|
||||
"""Short identifier (e.g. 'compressor', 'lcm')."""
|
||||
|
||||
# -- Token state (read by run_agent.py for display/logging) ------------
|
||||
#
|
||||
# Engines MUST maintain these. run_agent.py reads them directly.
|
||||
|
||||
last_prompt_tokens: int = 0
|
||||
last_completion_tokens: int = 0
|
||||
last_total_tokens: int = 0
|
||||
threshold_tokens: int = 0
|
||||
context_length: int = 0
|
||||
compression_count: int = 0
|
||||
|
||||
# -- Compaction parameters (read by run_agent.py for preflight) --------
|
||||
#
|
||||
# These control the preflight compression check. Subclasses may
|
||||
# override via __init__ or property; defaults are sensible for most
|
||||
# engines.
|
||||
|
||||
threshold_percent: float = 0.75
|
||||
protect_first_n: int = 3
|
||||
protect_last_n: int = 6
|
||||
|
||||
# -- Core interface ----------------------------------------------------
|
||||
|
||||
@abstractmethod
|
||||
def update_from_response(self, usage: Dict[str, Any]) -> None:
|
||||
"""Update tracked token usage from an API response.
|
||||
|
||||
Called after every LLM call with the usage dict from the response.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def should_compress(self, prompt_tokens: int = None) -> bool:
|
||||
"""Return True if compaction should fire this turn."""
|
||||
|
||||
@abstractmethod
|
||||
def compress(
|
||||
self,
|
||||
messages: List[Dict[str, Any]],
|
||||
current_tokens: int = None,
|
||||
focus_topic: str = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Compact the message list and return the new message list.
|
||||
|
||||
This is the main entry point. The engine receives the full message
|
||||
list and returns a (possibly shorter) list that fits within the
|
||||
context budget. The implementation is free to summarize, build a
|
||||
DAG, or do anything else — as long as the returned list is a valid
|
||||
OpenAI-format message sequence.
|
||||
|
||||
Args:
|
||||
focus_topic: Optional topic string from manual ``/compress <focus>``.
|
||||
Engines that support guided compression should prioritise
|
||||
preserving information related to this topic. Engines that
|
||||
don't support it may simply ignore this argument.
|
||||
"""
|
||||
|
||||
# -- Optional: pre-flight check ----------------------------------------
|
||||
|
||||
def should_compress_preflight(self, messages: List[Dict[str, Any]]) -> bool:
|
||||
"""Quick rough check before the API call (no real token count yet).
|
||||
|
||||
Default returns False (skip pre-flight). Override if your engine
|
||||
can do a cheap estimate.
|
||||
"""
|
||||
return False
|
||||
|
||||
# -- Optional: manual /compress preflight ------------------------------
|
||||
|
||||
def has_content_to_compress(self, messages: List[Dict[str, Any]]) -> bool:
|
||||
"""Quick check: is there anything in ``messages`` that can be compacted?
|
||||
|
||||
Used by the gateway ``/compress`` command as a preflight guard —
|
||||
returning False lets the gateway report "nothing to compress yet"
|
||||
without making an LLM call.
|
||||
|
||||
Default returns True (always attempt). Engines with a cheap way
|
||||
to introspect their own head/tail boundaries should override this
|
||||
to return False when the transcript is still entirely protected.
|
||||
"""
|
||||
return True
|
||||
|
||||
# -- Optional: session lifecycle ---------------------------------------
|
||||
|
||||
def on_session_start(self, session_id: str, **kwargs) -> None:
|
||||
"""Called when a new conversation session begins.
|
||||
|
||||
Use this to load persisted state (DAG, store) for the session.
|
||||
kwargs may include hermes_home, platform, model, etc.
|
||||
"""
|
||||
|
||||
def on_session_end(self, session_id: str, messages: List[Dict[str, Any]]) -> None:
|
||||
"""Called at real session boundaries (CLI exit, /reset, gateway expiry).
|
||||
|
||||
Use this to flush state, close DB connections, etc.
|
||||
NOT called per-turn — only when the session truly ends.
|
||||
"""
|
||||
|
||||
def on_session_reset(self) -> None:
|
||||
"""Called on /new or /reset. Reset per-session state.
|
||||
|
||||
Default resets compression_count and token tracking.
|
||||
"""
|
||||
self.last_prompt_tokens = 0
|
||||
self.last_completion_tokens = 0
|
||||
self.last_total_tokens = 0
|
||||
self.compression_count = 0
|
||||
|
||||
# -- Optional: tools ---------------------------------------------------
|
||||
|
||||
def get_tool_schemas(self) -> List[Dict[str, Any]]:
|
||||
"""Return tool schemas this engine provides to the agent.
|
||||
|
||||
Default returns empty list (no tools). LCM would return schemas
|
||||
for lcm_grep, lcm_describe, lcm_expand here.
|
||||
"""
|
||||
return []
|
||||
|
||||
def handle_tool_call(self, name: str, args: Dict[str, Any], **kwargs) -> str:
|
||||
"""Handle a tool call from the agent.
|
||||
|
||||
Only called for tool names returned by get_tool_schemas().
|
||||
Must return a JSON string.
|
||||
|
||||
kwargs may include:
|
||||
messages: the current in-memory message list (for live ingestion)
|
||||
"""
|
||||
import json
|
||||
return json.dumps({"error": f"Unknown context engine tool: {name}"})
|
||||
|
||||
# -- Optional: status / display ----------------------------------------
|
||||
|
||||
def get_status(self) -> Dict[str, Any]:
|
||||
"""Return status dict for display/logging.
|
||||
|
||||
Default returns the standard fields run_agent.py expects.
|
||||
"""
|
||||
return {
|
||||
"last_prompt_tokens": self.last_prompt_tokens,
|
||||
"threshold_tokens": self.threshold_tokens,
|
||||
"context_length": self.context_length,
|
||||
"usage_percent": (
|
||||
min(100, self.last_prompt_tokens / self.context_length * 100)
|
||||
if self.context_length else 0
|
||||
),
|
||||
"compression_count": self.compression_count,
|
||||
}
|
||||
|
||||
# -- Optional: model switch support ------------------------------------
|
||||
|
||||
def update_model(
|
||||
self,
|
||||
model: str,
|
||||
context_length: int,
|
||||
base_url: str = "",
|
||||
api_key: str = "",
|
||||
provider: str = "",
|
||||
) -> None:
|
||||
"""Called when the user switches models or on fallback activation.
|
||||
|
||||
Default updates context_length and recalculates threshold_tokens
|
||||
from threshold_percent. Override if your engine needs more
|
||||
(e.g. recalculate DAG budgets, switch summary models).
|
||||
"""
|
||||
self.context_length = context_length
|
||||
self.threshold_tokens = int(context_length * self.threshold_percent)
|
||||
@@ -13,9 +13,8 @@ from typing import Awaitable, Callable
|
||||
|
||||
from agent.model_metadata import estimate_tokens_rough
|
||||
|
||||
_QUOTED_REFERENCE_VALUE = r'(?:`[^`\n]+`|"[^"\n]+"|\'[^\'\n]+\')'
|
||||
REFERENCE_PATTERN = re.compile(
|
||||
rf"(?<![\w/])@(?:(?P<simple>diff|staged)\b|(?P<kind>file|folder|git|url):(?P<value>{_QUOTED_REFERENCE_VALUE}(?::\d+(?:-\d+)?)?|\S+))"
|
||||
r"(?<![\w/])@(?:(?P<simple>diff|staged)\b|(?P<kind>file|folder|git|url):(?P<value>\S+))"
|
||||
)
|
||||
TRAILING_PUNCTUATION = ",.;!?"
|
||||
_SENSITIVE_HOME_DIRS = (".ssh", ".aws", ".gnupg", ".kube", ".docker", ".azure", ".config/gh")
|
||||
@@ -82,10 +81,14 @@ def parse_context_references(message: str) -> list[ContextReference]:
|
||||
value = _strip_trailing_punctuation(match.group("value") or "")
|
||||
line_start = None
|
||||
line_end = None
|
||||
target = _strip_reference_wrappers(value)
|
||||
target = value
|
||||
|
||||
if kind == "file":
|
||||
target, line_start, line_end = _parse_file_reference_value(value)
|
||||
range_match = re.match(r"^(?P<path>.+?):(?P<start>\d+)(?:-(?P<end>\d+))?$", value)
|
||||
if range_match:
|
||||
target = range_match.group("path")
|
||||
line_start = int(range_match.group("start"))
|
||||
line_end = int(range_match.group("end") or range_match.group("start"))
|
||||
|
||||
refs.append(
|
||||
ContextReference(
|
||||
@@ -340,9 +343,10 @@ def _resolve_path(cwd: Path, target: str, *, allowed_root: Path | None = None) -
|
||||
|
||||
|
||||
def _ensure_reference_path_allowed(path: Path) -> None:
|
||||
from hermes_constants import get_hermes_home
|
||||
home = Path(os.path.expanduser("~")).resolve()
|
||||
hermes_home = get_hermes_home().resolve()
|
||||
hermes_home = Path(
|
||||
os.getenv("HERMES_HOME", str(home / ".hermes"))
|
||||
).expanduser().resolve()
|
||||
|
||||
blocked_exact = {home / rel for rel in _SENSITIVE_HOME_FILES}
|
||||
blocked_exact.add(hermes_home / ".env")
|
||||
@@ -372,38 +376,6 @@ def _strip_trailing_punctuation(value: str) -> str:
|
||||
return stripped
|
||||
|
||||
|
||||
def _strip_reference_wrappers(value: str) -> str:
|
||||
if len(value) >= 2 and value[0] == value[-1] and value[0] in "`\"'":
|
||||
return value[1:-1]
|
||||
return value
|
||||
|
||||
|
||||
def _parse_file_reference_value(value: str) -> tuple[str, int | None, int | None]:
|
||||
quoted_match = re.match(
|
||||
r'^(?P<quote>`|"|\')(?P<path>.+?)(?P=quote)(?::(?P<start>\d+)(?:-(?P<end>\d+))?)?$',
|
||||
value,
|
||||
)
|
||||
if quoted_match:
|
||||
line_start = quoted_match.group("start")
|
||||
line_end = quoted_match.group("end")
|
||||
return (
|
||||
quoted_match.group("path"),
|
||||
int(line_start) if line_start is not None else None,
|
||||
int(line_end or line_start) if line_start is not None else None,
|
||||
)
|
||||
|
||||
range_match = re.match(r"^(?P<path>.+?):(?P<start>\d+)(?:-(?P<end>\d+))?$", value)
|
||||
if range_match:
|
||||
line_start = int(range_match.group("start"))
|
||||
return (
|
||||
range_match.group("path"),
|
||||
line_start,
|
||||
int(range_match.group("end") or range_match.group("start")),
|
||||
)
|
||||
|
||||
return _strip_reference_wrappers(value), None, None
|
||||
|
||||
|
||||
def _remove_reference_tokens(message: str, refs: list[ContextReference]) -> str:
|
||||
pieces: list[str] = []
|
||||
cursor = 0
|
||||
@@ -483,7 +455,9 @@ def _rg_files(path: Path, cwd: Path, limit: int) -> list[Path] | None:
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
except (FileNotFoundError, OSError, subprocess.TimeoutExpired):
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
except subprocess.TimeoutExpired:
|
||||
return None
|
||||
if result.returncode != 0:
|
||||
return None
|
||||
|
||||
@@ -11,7 +11,6 @@ from __future__ import annotations
|
||||
import json
|
||||
import os
|
||||
import queue
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import threading
|
||||
@@ -21,15 +20,9 @@ from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from typing import Any
|
||||
|
||||
from agent.file_safety import get_read_block_error, is_write_denied
|
||||
from agent.redact import redact_sensitive_text
|
||||
|
||||
ACP_MARKER_BASE_URL = "acp://copilot"
|
||||
_DEFAULT_TIMEOUT_SECONDS = 900.0
|
||||
|
||||
_TOOL_CALL_BLOCK_RE = re.compile(r"<tool_call>\s*(\{.*?\})\s*</tool_call>", re.DOTALL)
|
||||
_TOOL_CALL_JSON_RE = re.compile(r"\{\s*\"id\"\s*:\s*\"[^\"]+\"\s*,\s*\"type\"\s*:\s*\"function\"\s*,\s*\"function\"\s*:\s*\{.*?\}\s*\}", re.DOTALL)
|
||||
|
||||
|
||||
def _resolve_command() -> str:
|
||||
return (
|
||||
@@ -46,47 +39,6 @@ def _resolve_args() -> list[str]:
|
||||
return shlex.split(raw)
|
||||
|
||||
|
||||
def _resolve_home_dir() -> str:
|
||||
"""Return a stable HOME for child ACP processes."""
|
||||
|
||||
try:
|
||||
from hermes_constants import get_subprocess_home
|
||||
|
||||
profile_home = get_subprocess_home()
|
||||
if profile_home:
|
||||
return profile_home
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
home = os.environ.get("HOME", "").strip()
|
||||
if home:
|
||||
return home
|
||||
|
||||
expanded = os.path.expanduser("~")
|
||||
if expanded and expanded != "~":
|
||||
return expanded
|
||||
|
||||
try:
|
||||
import pwd
|
||||
|
||||
resolved = pwd.getpwuid(os.getuid()).pw_dir.strip()
|
||||
if resolved:
|
||||
return resolved
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Last resort: /tmp (writable on any POSIX system). Avoids crashing the
|
||||
# subprocess with no HOME; callers can set HERMES_HOME explicitly if they
|
||||
# need a different writable dir.
|
||||
return "/tmp"
|
||||
|
||||
|
||||
def _build_subprocess_env() -> dict[str, str]:
|
||||
env = os.environ.copy()
|
||||
env["HOME"] = _resolve_home_dir()
|
||||
return env
|
||||
|
||||
|
||||
def _jsonrpc_error(message_id: Any, code: int, message: str) -> dict[str, Any]:
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
@@ -98,62 +50,15 @@ def _jsonrpc_error(message_id: Any, code: int, message: str) -> dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def _permission_denied(message_id: Any) -> dict[str, Any]:
|
||||
return {
|
||||
"jsonrpc": "2.0",
|
||||
"id": message_id,
|
||||
"result": {
|
||||
"outcome": {
|
||||
"outcome": "cancelled",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _format_messages_as_prompt(
|
||||
messages: list[dict[str, Any]],
|
||||
model: str | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
tool_choice: Any = None,
|
||||
) -> str:
|
||||
def _format_messages_as_prompt(messages: list[dict[str, Any]], model: str | None = None) -> str:
|
||||
sections: list[str] = [
|
||||
"You are being used as the active ACP agent backend for Hermes.",
|
||||
"Use ACP capabilities to complete tasks.",
|
||||
"IMPORTANT: If you take an action with a tool, you MUST output tool calls using <tool_call>{...}</tool_call> blocks with JSON exactly in OpenAI function-call shape.",
|
||||
"If no tool is needed, answer normally.",
|
||||
"Use your own ACP capabilities and respond directly in natural language.",
|
||||
"Do not emit OpenAI tool-call JSON.",
|
||||
]
|
||||
if model:
|
||||
sections.append(f"Hermes requested model hint: {model}")
|
||||
|
||||
if isinstance(tools, list) and tools:
|
||||
tool_specs: list[dict[str, Any]] = []
|
||||
for t in tools:
|
||||
if not isinstance(t, dict):
|
||||
continue
|
||||
fn = t.get("function") or {}
|
||||
if not isinstance(fn, dict):
|
||||
continue
|
||||
name = fn.get("name")
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
continue
|
||||
tool_specs.append(
|
||||
{
|
||||
"name": name.strip(),
|
||||
"description": fn.get("description", ""),
|
||||
"parameters": fn.get("parameters", {}),
|
||||
}
|
||||
)
|
||||
if tool_specs:
|
||||
sections.append(
|
||||
"Available tools (OpenAI function schema). "
|
||||
"When using a tool, emit ONLY <tool_call>{...}</tool_call> with one JSON object "
|
||||
"containing id/type/function{name,arguments}. arguments must be a JSON string.\n"
|
||||
+ json.dumps(tool_specs, ensure_ascii=False)
|
||||
)
|
||||
|
||||
if tool_choice is not None:
|
||||
sections.append(f"Tool choice hint: {json.dumps(tool_choice, ensure_ascii=False)}")
|
||||
|
||||
transcript: list[str] = []
|
||||
for message in messages:
|
||||
if not isinstance(message, dict):
|
||||
@@ -209,80 +114,6 @@ def _render_message_content(content: Any) -> str:
|
||||
return str(content).strip()
|
||||
|
||||
|
||||
def _extract_tool_calls_from_text(text: str) -> tuple[list[SimpleNamespace], str]:
|
||||
if not isinstance(text, str) or not text.strip():
|
||||
return [], ""
|
||||
|
||||
extracted: list[SimpleNamespace] = []
|
||||
consumed_spans: list[tuple[int, int]] = []
|
||||
|
||||
def _try_add_tool_call(raw_json: str) -> None:
|
||||
try:
|
||||
obj = json.loads(raw_json)
|
||||
except Exception:
|
||||
return
|
||||
if not isinstance(obj, dict):
|
||||
return
|
||||
fn = obj.get("function")
|
||||
if not isinstance(fn, dict):
|
||||
return
|
||||
fn_name = fn.get("name")
|
||||
if not isinstance(fn_name, str) or not fn_name.strip():
|
||||
return
|
||||
fn_args = fn.get("arguments", "{}")
|
||||
if not isinstance(fn_args, str):
|
||||
fn_args = json.dumps(fn_args, ensure_ascii=False)
|
||||
call_id = obj.get("id")
|
||||
if not isinstance(call_id, str) or not call_id.strip():
|
||||
call_id = f"acp_call_{len(extracted)+1}"
|
||||
|
||||
extracted.append(
|
||||
SimpleNamespace(
|
||||
id=call_id,
|
||||
call_id=call_id,
|
||||
response_item_id=None,
|
||||
type="function",
|
||||
function=SimpleNamespace(name=fn_name.strip(), arguments=fn_args),
|
||||
)
|
||||
)
|
||||
|
||||
for m in _TOOL_CALL_BLOCK_RE.finditer(text):
|
||||
raw = m.group(1)
|
||||
_try_add_tool_call(raw)
|
||||
consumed_spans.append((m.start(), m.end()))
|
||||
|
||||
# Only try bare-JSON fallback when no XML blocks were found.
|
||||
if not extracted:
|
||||
for m in _TOOL_CALL_JSON_RE.finditer(text):
|
||||
raw = m.group(0)
|
||||
_try_add_tool_call(raw)
|
||||
consumed_spans.append((m.start(), m.end()))
|
||||
|
||||
if not consumed_spans:
|
||||
return extracted, text.strip()
|
||||
|
||||
consumed_spans.sort()
|
||||
merged: list[tuple[int, int]] = []
|
||||
for start, end in consumed_spans:
|
||||
if not merged or start > merged[-1][1]:
|
||||
merged.append((start, end))
|
||||
else:
|
||||
merged[-1] = (merged[-1][0], max(merged[-1][1], end))
|
||||
|
||||
parts: list[str] = []
|
||||
cursor = 0
|
||||
for start, end in merged:
|
||||
if cursor < start:
|
||||
parts.append(text[cursor:start])
|
||||
cursor = max(cursor, end)
|
||||
if cursor < len(text):
|
||||
parts.append(text[cursor:])
|
||||
|
||||
cleaned = "\n".join(p.strip() for p in parts if p and p.strip()).strip()
|
||||
return extracted, cleaned
|
||||
|
||||
|
||||
|
||||
def _ensure_path_within_cwd(path_text: str, cwd: str) -> Path:
|
||||
candidate = Path(path_text)
|
||||
if not candidate.is_absolute():
|
||||
@@ -359,39 +190,14 @@ class CopilotACPClient:
|
||||
model: str | None = None,
|
||||
messages: list[dict[str, Any]] | None = None,
|
||||
timeout: float | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
tool_choice: Any = None,
|
||||
**_: Any,
|
||||
) -> Any:
|
||||
prompt_text = _format_messages_as_prompt(
|
||||
messages or [],
|
||||
model=model,
|
||||
tools=tools,
|
||||
tool_choice=tool_choice,
|
||||
)
|
||||
# Normalise timeout: run_agent.py may pass an httpx.Timeout object
|
||||
# (used natively by the OpenAI SDK) rather than a plain float.
|
||||
if timeout is None:
|
||||
_effective_timeout = _DEFAULT_TIMEOUT_SECONDS
|
||||
elif isinstance(timeout, (int, float)):
|
||||
_effective_timeout = float(timeout)
|
||||
else:
|
||||
# httpx.Timeout or similar — pick the largest component so the
|
||||
# subprocess has enough wall-clock time for the full response.
|
||||
_candidates = [
|
||||
getattr(timeout, attr, None)
|
||||
for attr in ("read", "write", "connect", "pool", "timeout")
|
||||
]
|
||||
_numeric = [float(v) for v in _candidates if isinstance(v, (int, float))]
|
||||
_effective_timeout = max(_numeric) if _numeric else _DEFAULT_TIMEOUT_SECONDS
|
||||
|
||||
prompt_text = _format_messages_as_prompt(messages or [], model=model)
|
||||
response_text, reasoning_text = self._run_prompt(
|
||||
prompt_text,
|
||||
timeout_seconds=_effective_timeout,
|
||||
timeout_seconds=float(timeout or _DEFAULT_TIMEOUT_SECONDS),
|
||||
)
|
||||
|
||||
tool_calls, cleaned_text = _extract_tool_calls_from_text(response_text)
|
||||
|
||||
usage = SimpleNamespace(
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
@@ -399,14 +205,13 @@ class CopilotACPClient:
|
||||
prompt_tokens_details=SimpleNamespace(cached_tokens=0),
|
||||
)
|
||||
assistant_message = SimpleNamespace(
|
||||
content=cleaned_text,
|
||||
tool_calls=tool_calls,
|
||||
content=response_text,
|
||||
tool_calls=[],
|
||||
reasoning=reasoning_text or None,
|
||||
reasoning_content=reasoning_text or None,
|
||||
reasoning_details=None,
|
||||
)
|
||||
finish_reason = "tool_calls" if tool_calls else "stop"
|
||||
choice = SimpleNamespace(message=assistant_message, finish_reason=finish_reason)
|
||||
choice = SimpleNamespace(message=assistant_message, finish_reason="stop")
|
||||
return SimpleNamespace(
|
||||
choices=[choice],
|
||||
usage=usage,
|
||||
@@ -423,7 +228,6 @@ class CopilotACPClient:
|
||||
text=True,
|
||||
bufsize=1,
|
||||
cwd=self._acp_cwd,
|
||||
env=_build_subprocess_env(),
|
||||
)
|
||||
except FileNotFoundError as exc:
|
||||
raise RuntimeError(
|
||||
@@ -443,8 +247,6 @@ class CopilotACPClient:
|
||||
stderr_tail: deque[str] = deque(maxlen=40)
|
||||
|
||||
def _stdout_reader() -> None:
|
||||
if proc.stdout is None:
|
||||
return
|
||||
for line in proc.stdout:
|
||||
try:
|
||||
inbox.put(json.loads(line))
|
||||
@@ -477,8 +279,8 @@ class CopilotACPClient:
|
||||
proc.stdin.write(json.dumps(payload) + "\n")
|
||||
proc.stdin.flush()
|
||||
|
||||
deadline = time.monotonic() + timeout_seconds
|
||||
while time.monotonic() < deadline:
|
||||
deadline = time.time() + timeout_seconds
|
||||
while time.time() < deadline:
|
||||
if proc.poll() is not None:
|
||||
break
|
||||
try:
|
||||
@@ -592,13 +394,18 @@ class CopilotACPClient:
|
||||
params = msg.get("params") or {}
|
||||
|
||||
if method == "session/request_permission":
|
||||
response = _permission_denied(message_id)
|
||||
response = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": message_id,
|
||||
"result": {
|
||||
"outcome": {
|
||||
"outcome": "allow_once",
|
||||
}
|
||||
},
|
||||
}
|
||||
elif method == "fs/read_text_file":
|
||||
try:
|
||||
path = _ensure_path_within_cwd(str(params.get("path") or ""), cwd)
|
||||
block_error = get_read_block_error(str(path))
|
||||
if block_error:
|
||||
raise PermissionError(block_error)
|
||||
content = path.read_text() if path.exists() else ""
|
||||
line = params.get("line")
|
||||
limit = params.get("limit")
|
||||
@@ -607,8 +414,6 @@ class CopilotACPClient:
|
||||
start = line - 1
|
||||
end = start + limit if isinstance(limit, int) and limit > 0 else None
|
||||
content = "".join(lines[start:end])
|
||||
if content:
|
||||
content = redact_sensitive_text(content, force=True)
|
||||
response = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": message_id,
|
||||
@@ -621,10 +426,6 @@ class CopilotACPClient:
|
||||
elif method == "fs/write_text_file":
|
||||
try:
|
||||
path = _ensure_path_within_cwd(str(params.get("path") or ""), cwd)
|
||||
if is_write_denied(str(path)):
|
||||
raise PermissionError(
|
||||
f"Write denied: '{path}' is a protected system/credential file."
|
||||
)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(str(params.get("content") or ""))
|
||||
response = {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,418 +0,0 @@
|
||||
"""Unified removal contract for every credential source Hermes reads from.
|
||||
|
||||
Hermes seeds its credential pool from many places:
|
||||
|
||||
env:<VAR> — os.environ / ~/.hermes/.env
|
||||
claude_code — ~/.claude/.credentials.json
|
||||
hermes_pkce — ~/.hermes/.anthropic_oauth.json
|
||||
device_code — auth.json providers.<provider> (nous, openai-codex, ...)
|
||||
qwen-cli — ~/.qwen/oauth_creds.json
|
||||
gh_cli — gh auth token
|
||||
config:<name> — custom_providers config entry
|
||||
model_config — model.api_key when model.provider == "custom"
|
||||
manual — user ran `hermes auth add`
|
||||
|
||||
Each source has its own reader inside ``agent.credential_pool._seed_from_*``
|
||||
(which keep their existing shape — we haven't restructured them). What we
|
||||
unify here is **removal**:
|
||||
|
||||
``hermes auth remove <provider> <N>`` must make the pool entry stay gone.
|
||||
|
||||
Before this module, every source had an ad-hoc removal branch in
|
||||
``auth_remove_command``, and several sources had no branch at all — so
|
||||
``auth remove`` silently reverted on the next ``load_pool()`` call for
|
||||
qwen-cli, nous device_code (partial), hermes_pkce, copilot gh_cli, and
|
||||
custom-config sources.
|
||||
|
||||
Now every source registers a ``RemovalStep`` that does exactly three things
|
||||
in the same shape:
|
||||
|
||||
1. Clean up whatever externally-readable state the source reads from
|
||||
(.env line, auth.json block, OAuth file, etc.)
|
||||
2. Suppress the ``(provider, source_id)`` in auth.json so the
|
||||
corresponding ``_seed_from_*`` branch skips the upsert on re-load
|
||||
3. Return ``RemovalResult`` describing what was cleaned and any
|
||||
diagnostic hints the user should see (shell-exported env vars,
|
||||
external credential files we deliberately don't delete, etc.)
|
||||
|
||||
Adding a new credential source is:
|
||||
- wire up a reader branch in ``_seed_from_*`` (existing pattern)
|
||||
- gate that reader behind ``is_source_suppressed(provider, source_id)``
|
||||
- register a ``RemovalStep`` here
|
||||
|
||||
No more per-source if/elif chain in ``auth_remove_command``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable, List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemovalResult:
|
||||
"""Outcome of removing a credential source.
|
||||
|
||||
Attributes:
|
||||
cleaned: Short strings describing external state that was actually
|
||||
mutated (``"Cleared XAI_API_KEY from .env"``,
|
||||
``"Cleared openai-codex OAuth tokens from auth store"``).
|
||||
Printed as plain lines to the user.
|
||||
hints: Diagnostic lines ABOUT state the user may need to clean up
|
||||
themselves or is deliberately left intact (shell-exported env
|
||||
var, Claude Code credential file we don't delete, etc.).
|
||||
Printed as plain lines to the user. Always non-destructive.
|
||||
suppress: Whether to call ``suppress_credential_source`` after
|
||||
cleanup so future ``load_pool`` calls skip this source.
|
||||
Default True — almost every source needs this to stay sticky.
|
||||
The only legitimate False is ``manual`` entries, which aren't
|
||||
seeded from anywhere external.
|
||||
"""
|
||||
|
||||
cleaned: List[str] = field(default_factory=list)
|
||||
hints: List[str] = field(default_factory=list)
|
||||
suppress: bool = True
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemovalStep:
|
||||
"""How to remove one specific credential source cleanly.
|
||||
|
||||
Attributes:
|
||||
provider: Provider pool key (``"xai"``, ``"anthropic"``, ``"nous"``, ...).
|
||||
Special value ``"*"`` means "matches any provider" — used for
|
||||
sources like ``manual`` that aren't provider-specific.
|
||||
source_id: Source identifier as it appears in
|
||||
``PooledCredential.source``. May be a literal (``"claude_code"``)
|
||||
or a prefix pattern matched via ``match_fn``.
|
||||
match_fn: Optional predicate overriding literal ``source_id``
|
||||
matching. Gets the removed entry's source string. Used for
|
||||
``env:*`` (any env-seeded key), ``config:*`` (any custom
|
||||
pool), and ``manual:*`` (any manual-source variant).
|
||||
remove_fn: ``(provider, removed_entry) -> RemovalResult``. Does the
|
||||
actual cleanup and returns what happened for the user.
|
||||
description: One-line human-readable description for docs / tests.
|
||||
"""
|
||||
|
||||
provider: str
|
||||
source_id: str
|
||||
remove_fn: Callable[..., RemovalResult]
|
||||
match_fn: Optional[Callable[[str], bool]] = None
|
||||
description: str = ""
|
||||
|
||||
def matches(self, provider: str, source: str) -> bool:
|
||||
if self.provider != "*" and self.provider != provider:
|
||||
return False
|
||||
if self.match_fn is not None:
|
||||
return self.match_fn(source)
|
||||
return source == self.source_id
|
||||
|
||||
|
||||
_REGISTRY: List[RemovalStep] = []
|
||||
|
||||
|
||||
def register(step: RemovalStep) -> RemovalStep:
|
||||
_REGISTRY.append(step)
|
||||
return step
|
||||
|
||||
|
||||
def find_removal_step(provider: str, source: str) -> Optional[RemovalStep]:
|
||||
"""Return the first matching RemovalStep, or None if unregistered.
|
||||
|
||||
Unregistered sources fall through to the default remove path in
|
||||
``auth_remove_command``: the pool entry is already gone (that happens
|
||||
before dispatch), no external cleanup, no suppression. This is the
|
||||
correct behaviour for ``manual`` entries — they were only ever stored
|
||||
in the pool, nothing external to clean up.
|
||||
"""
|
||||
for step in _REGISTRY:
|
||||
if step.matches(provider, source):
|
||||
return step
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Individual RemovalStep implementations — one per source.
|
||||
# ---------------------------------------------------------------------------
|
||||
# Each remove_fn is intentionally small and single-purpose. Adding a new
|
||||
# credential source means adding ONE entry here — no other changes to
|
||||
# auth_remove_command.
|
||||
|
||||
|
||||
def _remove_env_source(provider: str, removed) -> RemovalResult:
|
||||
"""env:<VAR> — the most common case.
|
||||
|
||||
Handles three user situations:
|
||||
1. Var lives only in ~/.hermes/.env → clear it
|
||||
2. Var lives only in the user's shell (shell profile, systemd
|
||||
EnvironmentFile, launchd plist) → hint them where to unset it
|
||||
3. Var lives in both → clear from .env, hint about shell
|
||||
"""
|
||||
from hermes_cli.config import get_env_path, remove_env_value
|
||||
|
||||
result = RemovalResult()
|
||||
env_var = removed.source[len("env:"):]
|
||||
if not env_var:
|
||||
return result
|
||||
|
||||
# Detect shell vs .env BEFORE remove_env_value pops os.environ.
|
||||
env_in_process = bool(os.getenv(env_var))
|
||||
env_in_dotenv = False
|
||||
try:
|
||||
env_path = get_env_path()
|
||||
if env_path.exists():
|
||||
env_in_dotenv = any(
|
||||
line.strip().startswith(f"{env_var}=")
|
||||
for line in env_path.read_text(errors="replace").splitlines()
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
shell_exported = env_in_process and not env_in_dotenv
|
||||
|
||||
cleared = remove_env_value(env_var)
|
||||
if cleared:
|
||||
result.cleaned.append(f"Cleared {env_var} from .env")
|
||||
|
||||
if shell_exported:
|
||||
result.hints.extend([
|
||||
f"Note: {env_var} is still set in your shell environment "
|
||||
f"(not in ~/.hermes/.env).",
|
||||
" Unset it there (shell profile, systemd EnvironmentFile, "
|
||||
"launchd plist, etc.) or it will keep being visible to Hermes.",
|
||||
f" The pool entry is now suppressed — Hermes will ignore "
|
||||
f"{env_var} until you run `hermes auth add {provider}`.",
|
||||
])
|
||||
else:
|
||||
result.hints.append(
|
||||
f"Suppressed env:{env_var} — it will not be re-seeded even "
|
||||
f"if the variable is re-exported later."
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def _remove_claude_code(provider: str, removed) -> RemovalResult:
|
||||
"""~/.claude/.credentials.json is owned by Claude Code itself.
|
||||
|
||||
We don't delete it — the user's Claude Code install still needs to
|
||||
work. We just suppress it so Hermes stops reading it.
|
||||
"""
|
||||
return RemovalResult(hints=[
|
||||
"Suppressed claude_code credential — it will not be re-seeded.",
|
||||
"Note: Claude Code credentials still live in ~/.claude/.credentials.json",
|
||||
"Run `hermes auth add anthropic` to re-enable if needed.",
|
||||
])
|
||||
|
||||
|
||||
def _remove_hermes_pkce(provider: str, removed) -> RemovalResult:
|
||||
"""~/.hermes/.anthropic_oauth.json is ours — delete it outright."""
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
result = RemovalResult()
|
||||
oauth_file = get_hermes_home() / ".anthropic_oauth.json"
|
||||
if oauth_file.exists():
|
||||
try:
|
||||
oauth_file.unlink()
|
||||
result.cleaned.append("Cleared Hermes Anthropic OAuth credentials")
|
||||
except OSError as exc:
|
||||
result.hints.append(f"Could not delete {oauth_file}: {exc}")
|
||||
return result
|
||||
|
||||
|
||||
def _clear_auth_store_provider(provider: str) -> bool:
|
||||
"""Delete auth_store.providers[provider]. Returns True if deleted."""
|
||||
from hermes_cli.auth import (
|
||||
_auth_store_lock,
|
||||
_load_auth_store,
|
||||
_save_auth_store,
|
||||
)
|
||||
|
||||
with _auth_store_lock():
|
||||
auth_store = _load_auth_store()
|
||||
providers_dict = auth_store.get("providers")
|
||||
if isinstance(providers_dict, dict) and provider in providers_dict:
|
||||
del providers_dict[provider]
|
||||
_save_auth_store(auth_store)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _remove_nous_device_code(provider: str, removed) -> RemovalResult:
|
||||
"""Nous OAuth lives in auth.json providers.nous — clear it and suppress.
|
||||
|
||||
We suppress in addition to clearing because nothing else stops the
|
||||
user's next `hermes login` run from writing providers.nous again
|
||||
before they decide to. Suppression forces them to go through
|
||||
`hermes auth add nous` to re-engage, which is the documented re-add
|
||||
path and clears the suppression atomically.
|
||||
"""
|
||||
result = RemovalResult()
|
||||
if _clear_auth_store_provider(provider):
|
||||
result.cleaned.append(f"Cleared {provider} OAuth tokens from auth store")
|
||||
return result
|
||||
|
||||
|
||||
def _remove_minimax_oauth(provider: str, removed) -> RemovalResult:
|
||||
"""MiniMax OAuth lives in auth.json providers.minimax-oauth — clear it.
|
||||
|
||||
Same pattern as Nous: single-source OAuth state with refresh tokens.
|
||||
Suppression of the `oauth` source ensures the pool reseed path
|
||||
(_seed_from_singletons) doesn't instantly undo the removal.
|
||||
"""
|
||||
result = RemovalResult()
|
||||
if _clear_auth_store_provider(provider):
|
||||
result.cleaned.append(f"Cleared {provider} OAuth tokens from auth store")
|
||||
return result
|
||||
|
||||
|
||||
def _remove_codex_device_code(provider: str, removed) -> RemovalResult:
|
||||
"""Codex tokens live in TWO places: our auth store AND ~/.codex/auth.json.
|
||||
|
||||
refresh_codex_oauth_pure() writes both every time, so clearing only
|
||||
the Hermes auth store is not enough — _seed_from_singletons() would
|
||||
re-import from ~/.codex/auth.json on the next load_pool() call and
|
||||
the removal would be instantly undone. We suppress instead of
|
||||
deleting Codex CLI's file, so the Codex CLI itself keeps working.
|
||||
|
||||
The canonical source name in ``_seed_from_singletons`` is
|
||||
``"device_code"`` (no prefix). Entries may show up in the pool as
|
||||
either ``"device_code"`` (seeded) or ``"manual:device_code"`` (added
|
||||
via ``hermes auth add openai-codex``), but in both cases the re-seed
|
||||
gate lives at the ``"device_code"`` suppression key. We suppress
|
||||
that canonical key here; the central dispatcher also suppresses
|
||||
``removed.source`` which is fine — belt-and-suspenders, idempotent.
|
||||
"""
|
||||
from hermes_cli.auth import suppress_credential_source
|
||||
|
||||
result = RemovalResult()
|
||||
if _clear_auth_store_provider(provider):
|
||||
result.cleaned.append(f"Cleared {provider} OAuth tokens from auth store")
|
||||
# Suppress the canonical re-seed source, not just whatever source the
|
||||
# removed entry had. Otherwise `manual:device_code` removals wouldn't
|
||||
# block the `device_code` re-seed path.
|
||||
suppress_credential_source(provider, "device_code")
|
||||
result.hints.extend([
|
||||
"Suppressed openai-codex device_code source — it will not be re-seeded.",
|
||||
"Note: Codex CLI credentials still live in ~/.codex/auth.json",
|
||||
"Run `hermes auth add openai-codex` to re-enable if needed.",
|
||||
])
|
||||
return result
|
||||
|
||||
|
||||
def _remove_qwen_cli(provider: str, removed) -> RemovalResult:
|
||||
"""~/.qwen/oauth_creds.json is owned by the Qwen CLI.
|
||||
|
||||
Same pattern as claude_code — suppress, don't delete. The user's
|
||||
Qwen CLI install still reads from that file.
|
||||
"""
|
||||
return RemovalResult(hints=[
|
||||
"Suppressed qwen-cli credential — it will not be re-seeded.",
|
||||
"Note: Qwen CLI credentials still live in ~/.qwen/oauth_creds.json",
|
||||
"Run `hermes auth add qwen-oauth` to re-enable if needed.",
|
||||
])
|
||||
|
||||
|
||||
def _remove_copilot_gh(provider: str, removed) -> RemovalResult:
|
||||
"""Copilot token comes from `gh auth token` or COPILOT_GITHUB_TOKEN / GH_TOKEN / GITHUB_TOKEN.
|
||||
|
||||
Copilot is special: the same token can be seeded as multiple source
|
||||
entries (gh_cli from ``_seed_from_singletons`` plus env:<VAR> from
|
||||
``_seed_from_env``), so removing one entry without suppressing the
|
||||
others lets the duplicates resurrect. We suppress ALL known copilot
|
||||
sources here so removal is stable regardless of which entry the
|
||||
user clicked.
|
||||
|
||||
We don't touch the user's gh CLI or shell state — just suppress so
|
||||
Hermes stops picking the token up.
|
||||
"""
|
||||
# Suppress ALL copilot source variants up-front so no path resurrects
|
||||
# the pool entry. The central dispatcher in auth_remove_command will
|
||||
# ALSO suppress removed.source, but it's idempotent so double-calling
|
||||
# is harmless.
|
||||
from hermes_cli.auth import suppress_credential_source
|
||||
suppress_credential_source(provider, "gh_cli")
|
||||
for env_var in ("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"):
|
||||
suppress_credential_source(provider, f"env:{env_var}")
|
||||
|
||||
return RemovalResult(hints=[
|
||||
"Suppressed all copilot token sources (gh_cli + env vars) — they will not be re-seeded.",
|
||||
"Note: Your gh CLI / shell environment is unchanged.",
|
||||
"Run `hermes auth add copilot` to re-enable if needed.",
|
||||
])
|
||||
|
||||
|
||||
def _remove_custom_config(provider: str, removed) -> RemovalResult:
|
||||
"""Custom provider pools are seeded from custom_providers config or
|
||||
model.api_key. Both are in config.yaml — modifying that from here
|
||||
is more invasive than suppression. We suppress; the user can edit
|
||||
config.yaml if they want to remove the key from disk entirely.
|
||||
"""
|
||||
source_label = removed.source
|
||||
return RemovalResult(hints=[
|
||||
f"Suppressed {source_label} — it will not be re-seeded.",
|
||||
"Note: The underlying value in config.yaml is unchanged. Edit it "
|
||||
"directly if you want to remove the credential from disk.",
|
||||
])
|
||||
|
||||
|
||||
def _register_all_sources() -> None:
|
||||
"""Called once on module import.
|
||||
|
||||
ORDER MATTERS — ``find_removal_step`` returns the first match. Put
|
||||
provider-specific steps before the generic ``env:*`` step so that e.g.
|
||||
copilot's ``env:GH_TOKEN`` goes through the copilot removal (which
|
||||
doesn't touch the user's shell), not the generic env-var removal
|
||||
(which would try to clear .env).
|
||||
"""
|
||||
register(RemovalStep(
|
||||
provider="copilot", source_id="gh_cli",
|
||||
match_fn=lambda src: src == "gh_cli" or src.startswith("env:"),
|
||||
remove_fn=_remove_copilot_gh,
|
||||
description="gh auth token / COPILOT_GITHUB_TOKEN / GH_TOKEN",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="*", source_id="env:",
|
||||
match_fn=lambda src: src.startswith("env:"),
|
||||
remove_fn=_remove_env_source,
|
||||
description="Any env-seeded credential (XAI_API_KEY, DEEPSEEK_API_KEY, etc.)",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="anthropic", source_id="claude_code",
|
||||
remove_fn=_remove_claude_code,
|
||||
description="~/.claude/.credentials.json",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="anthropic", source_id="hermes_pkce",
|
||||
remove_fn=_remove_hermes_pkce,
|
||||
description="~/.hermes/.anthropic_oauth.json",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="nous", source_id="device_code",
|
||||
remove_fn=_remove_nous_device_code,
|
||||
description="auth.json providers.nous",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="openai-codex", source_id="device_code",
|
||||
match_fn=lambda src: src == "device_code" or src.endswith(":device_code"),
|
||||
remove_fn=_remove_codex_device_code,
|
||||
description="auth.json providers.openai-codex + ~/.codex/auth.json",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="qwen-oauth", source_id="qwen-cli",
|
||||
remove_fn=_remove_qwen_cli,
|
||||
description="~/.qwen/oauth_creds.json",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="minimax-oauth", source_id="oauth",
|
||||
remove_fn=_remove_minimax_oauth,
|
||||
description="auth.json providers.minimax-oauth",
|
||||
))
|
||||
register(RemovalStep(
|
||||
provider="*", source_id="config:",
|
||||
match_fn=lambda src: src.startswith("config:") or src == "model_config",
|
||||
remove_fn=_remove_custom_config,
|
||||
description="Custom provider config.yaml api_key field",
|
||||
))
|
||||
|
||||
|
||||
_register_all_sources()
|
||||
1674
agent/curator.py
1674
agent/curator.py
File diff suppressed because it is too large
Load Diff
@@ -1,693 +0,0 @@
|
||||
"""Curator snapshot + rollback.
|
||||
|
||||
A pre-run snapshot of ``~/.hermes/skills/`` (excluding ``.curator_backups/``
|
||||
itself) is taken before any mutating curator pass. Snapshots are tar.gz
|
||||
files under ``~/.hermes/skills/.curator_backups/<utc-iso>/`` with a
|
||||
companion ``manifest.json`` describing the snapshot (reason, time, size,
|
||||
counted skill files). Rollback picks a snapshot, moves the current
|
||||
``skills/`` tree aside into another snapshot so even the rollback itself
|
||||
is undoable, then extracts the chosen snapshot into place.
|
||||
|
||||
The snapshot does NOT include:
|
||||
- ``.curator_backups/`` (would recurse)
|
||||
- ``.hub/`` (hub-installed skills — managed by the hub, not us)
|
||||
|
||||
It DOES include:
|
||||
- all SKILL.md files + their directories (``scripts/``, ``references/``,
|
||||
``templates/``, ``assets/``)
|
||||
- ``.usage.json`` (usage telemetry — needed to rehydrate state cleanly)
|
||||
- ``.archive/`` (so rollback restores previously-archived skills too)
|
||||
- ``.curator_state`` (so rolling back also restores the last-run-at
|
||||
pointer — otherwise the curator would immediately re-fire on the next
|
||||
tick)
|
||||
- ``.bundled_manifest`` (so protection markers stay consistent)
|
||||
|
||||
Alongside the skills tarball, each snapshot also captures a copy of
|
||||
``~/.hermes/cron/jobs.json`` as ``cron-jobs.json`` when it exists. Cron
|
||||
jobs reference skills by name in their ``skills``/``skill`` fields; the
|
||||
curator's consolidation pass rewrites those in place via
|
||||
``cron.jobs.rewrite_skill_refs()``. Without capturing the pre-run state,
|
||||
rolling back the skills tree would leave cron jobs pointing at the
|
||||
umbrella skills even though the narrow skills they were originally
|
||||
configured with have been restored. We store the whole jobs.json for
|
||||
fidelity but rollback only touches the ``skills``/``skill`` fields — the
|
||||
rest (schedule, next_run_at, enabled, prompt, etc.) is live state and
|
||||
we leave it alone.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tarfile
|
||||
import tempfile
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DEFAULT_KEEP = 5
|
||||
|
||||
# Entries under skills/ that should NEVER be rolled up into a snapshot.
|
||||
# .hub/ is managed by the skills hub; rolling it back would break lockfile
|
||||
# invariants. .curator_backups is the backup dir itself — recursion bomb.
|
||||
_EXCLUDE_TOP_LEVEL = {".curator_backups", ".hub"}
|
||||
|
||||
# Snapshot id regex: UTC ISO with colons replaced by dashes so the filename
|
||||
# is portable (Windows-safe). An optional ``-NN`` suffix handles two
|
||||
# snapshots landing in the same wallclock second.
|
||||
_ID_RE = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}-\d{2}-\d{2}Z(-\d{2})?$")
|
||||
|
||||
|
||||
def _backups_dir() -> Path:
|
||||
return get_hermes_home() / "skills" / ".curator_backups"
|
||||
|
||||
|
||||
def _skills_dir() -> Path:
|
||||
return get_hermes_home() / "skills"
|
||||
|
||||
|
||||
def _cron_jobs_file() -> Path:
|
||||
"""Source path for the live cron jobs store (``~/.hermes/cron/jobs.json``)."""
|
||||
return get_hermes_home() / "cron" / "jobs.json"
|
||||
|
||||
|
||||
CRON_JOBS_FILENAME = "cron-jobs.json"
|
||||
|
||||
|
||||
def _backup_cron_jobs_into(dest: Path) -> Dict[str, Any]:
|
||||
"""Copy the live cron jobs.json into ``dest`` as ``cron-jobs.json``.
|
||||
|
||||
Returns a small dict describing what was captured so the caller can
|
||||
fold it into the manifest. Never raises — if the cron file is missing
|
||||
or unreadable, the return dict has ``backed_up=False`` and the reason,
|
||||
and the snapshot proceeds without cron data (the snapshot is still
|
||||
useful for rolling back skills).
|
||||
"""
|
||||
src = _cron_jobs_file()
|
||||
info: Dict[str, Any] = {"backed_up": False, "jobs_count": 0}
|
||||
if not src.exists():
|
||||
info["reason"] = "no cron/jobs.json present"
|
||||
return info
|
||||
try:
|
||||
raw = src.read_text(encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.debug("Failed to read cron/jobs.json for backup: %s", e)
|
||||
info["reason"] = f"read error: {e}"
|
||||
return info
|
||||
# Count jobs as a nice diagnostic — but don't fail the snapshot if the
|
||||
# file is unparseable; just store the raw text and let rollback deal
|
||||
# with it (or not, if it's corrupted). jobs.json wraps the list as
|
||||
# `{"jobs": [...], "updated_at": ...}` — we count via that shape, and
|
||||
# fall back to bare-list shape just in case the format ever changes.
|
||||
try:
|
||||
parsed = json.loads(raw)
|
||||
if isinstance(parsed, dict):
|
||||
inner = parsed.get("jobs")
|
||||
if isinstance(inner, list):
|
||||
info["jobs_count"] = len(inner)
|
||||
elif isinstance(parsed, list):
|
||||
info["jobs_count"] = len(parsed)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
info["jobs_count"] = 0
|
||||
info["parse_warning"] = "jobs.json was not valid JSON at snapshot time"
|
||||
try:
|
||||
(dest / CRON_JOBS_FILENAME).write_text(raw, encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.debug("Failed to write cron backup file: %s", e)
|
||||
info["reason"] = f"write error: {e}"
|
||||
return info
|
||||
info["backed_up"] = True
|
||||
return info
|
||||
|
||||
|
||||
def _utc_id(now: Optional[datetime] = None) -> str:
|
||||
"""UTC ISO-ish filesystem-safe timestamp: ``2026-05-01T13-05-42Z``."""
|
||||
if now is None:
|
||||
now = datetime.now(timezone.utc)
|
||||
# isoformat → "2026-05-01T13:05:42.123456+00:00"; strip subseconds and tz.
|
||||
s = now.replace(microsecond=0).isoformat()
|
||||
if s.endswith("+00:00"):
|
||||
s = s[:-6]
|
||||
return s.replace(":", "-") + "Z"
|
||||
|
||||
|
||||
def _load_config() -> Dict[str, Any]:
|
||||
try:
|
||||
from hermes_cli.config import load_config
|
||||
cfg = load_config()
|
||||
except Exception as e:
|
||||
logger.debug("Failed to load config for curator backup: %s", e)
|
||||
return {}
|
||||
if not isinstance(cfg, dict):
|
||||
return {}
|
||||
cur = cfg.get("curator") or {}
|
||||
if not isinstance(cur, dict):
|
||||
return {}
|
||||
bk = cur.get("backup") or {}
|
||||
return bk if isinstance(bk, dict) else {}
|
||||
|
||||
|
||||
def is_enabled() -> bool:
|
||||
"""Default ON — the whole point of the backup is safety by default."""
|
||||
return bool(_load_config().get("enabled", True))
|
||||
|
||||
|
||||
def get_keep() -> int:
|
||||
cfg = _load_config()
|
||||
try:
|
||||
n = int(cfg.get("keep", DEFAULT_KEEP))
|
||||
except (TypeError, ValueError):
|
||||
n = DEFAULT_KEEP
|
||||
return max(1, n)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Snapshot
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _count_skill_files(base: Path) -> int:
|
||||
try:
|
||||
return sum(1 for _ in base.rglob("SKILL.md"))
|
||||
except OSError:
|
||||
return 0
|
||||
|
||||
|
||||
def _write_manifest(dest: Path, reason: str, archive_path: Path,
|
||||
skills_counted: int,
|
||||
cron_info: Optional[Dict[str, Any]] = None) -> None:
|
||||
manifest = {
|
||||
"id": dest.name,
|
||||
"reason": reason,
|
||||
"created_at": datetime.now(timezone.utc).isoformat(),
|
||||
"archive": archive_path.name,
|
||||
"archive_bytes": archive_path.stat().st_size,
|
||||
"skill_files": skills_counted,
|
||||
}
|
||||
if cron_info is not None:
|
||||
manifest["cron_jobs"] = {
|
||||
"backed_up": bool(cron_info.get("backed_up", False)),
|
||||
"jobs_count": int(cron_info.get("jobs_count", 0)),
|
||||
}
|
||||
if not cron_info.get("backed_up"):
|
||||
manifest["cron_jobs"]["reason"] = cron_info.get("reason", "not captured")
|
||||
if cron_info.get("parse_warning"):
|
||||
manifest["cron_jobs"]["parse_warning"] = cron_info["parse_warning"]
|
||||
(dest / "manifest.json").write_text(
|
||||
json.dumps(manifest, indent=2, sort_keys=True), encoding="utf-8"
|
||||
)
|
||||
|
||||
|
||||
def snapshot_skills(reason: str = "manual") -> Optional[Path]:
|
||||
"""Create a tar.gz snapshot of ``~/.hermes/skills/`` and prune old ones.
|
||||
|
||||
Returns the snapshot directory path, or ``None`` if the snapshot was
|
||||
skipped (backup disabled, skills dir missing, or an IO error occurred —
|
||||
in which case we log at debug and return None so the curator never
|
||||
aborts a pass because of a backup failure).
|
||||
"""
|
||||
if not is_enabled():
|
||||
logger.debug("Curator backup disabled by config; skipping snapshot")
|
||||
return None
|
||||
|
||||
skills = _skills_dir()
|
||||
if not skills.exists():
|
||||
logger.debug("No ~/.hermes/skills/ directory — nothing to back up")
|
||||
return None
|
||||
|
||||
backups = _backups_dir()
|
||||
try:
|
||||
backups.mkdir(parents=True, exist_ok=True)
|
||||
except OSError as e:
|
||||
logger.debug("Failed to create backups dir %s: %s", backups, e)
|
||||
return None
|
||||
|
||||
# Uniquify: if a snapshot with the same second already exists (can
|
||||
# happen if two curator runs fire in the same second), append a short
|
||||
# counter. Avoids clobbering and avoids timestamp collisions.
|
||||
base_id = _utc_id()
|
||||
snap_id = base_id
|
||||
counter = 1
|
||||
while (backups / snap_id).exists():
|
||||
snap_id = f"{base_id}-{counter:02d}"
|
||||
counter += 1
|
||||
|
||||
dest = backups / snap_id
|
||||
try:
|
||||
dest.mkdir(parents=True, exist_ok=False)
|
||||
except OSError as e:
|
||||
logger.debug("Failed to create snapshot dir %s: %s", dest, e)
|
||||
return None
|
||||
|
||||
archive = dest / "skills.tar.gz"
|
||||
try:
|
||||
# Stream into the tarball — no tempdir copy needed.
|
||||
with tarfile.open(archive, "w:gz", compresslevel=6) as tf:
|
||||
for entry in sorted(skills.iterdir()):
|
||||
if entry.name in _EXCLUDE_TOP_LEVEL:
|
||||
continue
|
||||
# arcname: store paths relative to skills/ so extraction
|
||||
# drops cleanly back into the skills dir.
|
||||
tf.add(str(entry), arcname=entry.name, recursive=True)
|
||||
# Capture cron/jobs.json alongside the tarball. Never fails the
|
||||
# snapshot — the skills side is the core guarantee; cron is
|
||||
# additive. We still record in the manifest whether it was
|
||||
# captured so rollback can surface "no cron data in this snapshot".
|
||||
cron_info = _backup_cron_jobs_into(dest)
|
||||
_write_manifest(dest, reason, archive,
|
||||
_count_skill_files(skills),
|
||||
cron_info=cron_info)
|
||||
except (OSError, tarfile.TarError) as e:
|
||||
logger.debug("Curator snapshot failed: %s", e, exc_info=True)
|
||||
# Clean up partial snapshot
|
||||
try:
|
||||
shutil.rmtree(dest, ignore_errors=True)
|
||||
except OSError:
|
||||
pass
|
||||
return None
|
||||
|
||||
_prune_old(keep=get_keep())
|
||||
logger.info("Curator snapshot created: %s (%s)", snap_id, reason)
|
||||
return dest
|
||||
|
||||
|
||||
def _prune_old(keep: int) -> List[str]:
|
||||
"""Delete regular snapshots beyond the newest *keep*. Returns deleted
|
||||
ids. Staging dirs (``.rollback-staging-*``) are implementation detail
|
||||
and pruned independently on every call."""
|
||||
backups = _backups_dir()
|
||||
if not backups.exists():
|
||||
return []
|
||||
entries: List[Tuple[str, Path]] = []
|
||||
stale_staging: List[Path] = []
|
||||
for child in backups.iterdir():
|
||||
if not child.is_dir():
|
||||
continue
|
||||
if child.name.startswith(".rollback-staging-"):
|
||||
# Staging dirs are only supposed to exist briefly during a
|
||||
# rollback. If we find one here (e.g. from a crashed rollback),
|
||||
# clean it up opportunistically.
|
||||
stale_staging.append(child)
|
||||
continue
|
||||
if _ID_RE.match(child.name):
|
||||
entries.append((child.name, child))
|
||||
# Newest first (lexicographic works because the id is UTC ISO).
|
||||
entries.sort(key=lambda t: t[0], reverse=True)
|
||||
deleted: List[str] = []
|
||||
for _, path in entries[keep:]:
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
deleted.append(path.name)
|
||||
except OSError as e:
|
||||
logger.debug("Failed to prune %s: %s", path, e)
|
||||
for path in stale_staging:
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
except OSError as e:
|
||||
logger.debug("Failed to clean stale staging dir %s: %s", path, e)
|
||||
return deleted
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# List + rollback
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _read_manifest(snap_dir: Path) -> Dict[str, Any]:
|
||||
mf = snap_dir / "manifest.json"
|
||||
if not mf.exists():
|
||||
return {}
|
||||
try:
|
||||
return json.loads(mf.read_text(encoding="utf-8"))
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return {}
|
||||
|
||||
|
||||
def list_backups() -> List[Dict[str, Any]]:
|
||||
"""Return all restorable snapshots, newest first. Only entries with a
|
||||
real ``skills.tar.gz`` tarball are listed — transient
|
||||
``.rollback-staging-*`` directories created mid-rollback are
|
||||
implementation detail and not shown."""
|
||||
backups = _backups_dir()
|
||||
if not backups.exists():
|
||||
return []
|
||||
out: List[Dict[str, Any]] = []
|
||||
for child in sorted(backups.iterdir(), reverse=True):
|
||||
if not child.is_dir():
|
||||
continue
|
||||
if not _ID_RE.match(child.name):
|
||||
continue
|
||||
if not (child / "skills.tar.gz").exists():
|
||||
continue
|
||||
mf = _read_manifest(child)
|
||||
mf.setdefault("id", child.name)
|
||||
mf.setdefault("path", str(child))
|
||||
if "archive_bytes" not in mf:
|
||||
arc = child / "skills.tar.gz"
|
||||
try:
|
||||
mf["archive_bytes"] = arc.stat().st_size
|
||||
except OSError:
|
||||
mf["archive_bytes"] = 0
|
||||
out.append(mf)
|
||||
return out
|
||||
|
||||
|
||||
def _resolve_backup(backup_id: Optional[str]) -> Optional[Path]:
|
||||
"""Return the path of the requested backup, or the newest one if
|
||||
*backup_id* is None. Returns None if no match."""
|
||||
backups = _backups_dir()
|
||||
if not backups.exists():
|
||||
return None
|
||||
if backup_id:
|
||||
target = backups / backup_id
|
||||
if (
|
||||
target.is_dir()
|
||||
and _ID_RE.match(backup_id)
|
||||
and (target / "skills.tar.gz").exists()
|
||||
):
|
||||
return target
|
||||
return None
|
||||
candidates = [
|
||||
c for c in sorted(backups.iterdir(), reverse=True)
|
||||
if c.is_dir() and _ID_RE.match(c.name) and (c / "skills.tar.gz").exists()
|
||||
]
|
||||
return candidates[0] if candidates else None
|
||||
|
||||
|
||||
def _restore_cron_skill_links(snapshot_dir: Path) -> Dict[str, Any]:
|
||||
"""Reconcile backed-up cron skill links into the live ``cron/jobs.json``.
|
||||
|
||||
We do NOT overwrite the whole cron file. Only the ``skills`` and
|
||||
``skill`` fields are restored, and only on jobs that still exist in the
|
||||
current file (matched by ``id``). Everything else about the job —
|
||||
schedule, next_run_at, last_run_at, enabled, prompt, workdir, hooks —
|
||||
is live state that the user/scheduler has modified since the snapshot;
|
||||
overwriting it would regress unrelated cron activity.
|
||||
|
||||
Rules:
|
||||
- Jobs present in backup AND live, with differing skills → skills restored.
|
||||
- Jobs present in backup AND live, with matching skills → no-op.
|
||||
- Jobs present in backup but gone from live (user deleted the job
|
||||
after the snapshot) → skipped, noted in the return report.
|
||||
- Jobs present in live but not in backup (user created a new cron
|
||||
job after the snapshot) → left untouched.
|
||||
|
||||
Never raises; failures are captured in the return dict. Writes through
|
||||
``cron.jobs`` to pick up the same lock + atomic-write path that tick()
|
||||
uses, so we don't race the scheduler.
|
||||
"""
|
||||
report: Dict[str, Any] = {
|
||||
"attempted": False,
|
||||
"restored": [],
|
||||
"skipped_missing": [],
|
||||
"unchanged": 0,
|
||||
"error": None,
|
||||
}
|
||||
backup_file = snapshot_dir / CRON_JOBS_FILENAME
|
||||
if not backup_file.exists():
|
||||
report["error"] = f"snapshot has no {CRON_JOBS_FILENAME}"
|
||||
return report
|
||||
|
||||
try:
|
||||
backup_text = backup_file.read_text(encoding="utf-8")
|
||||
backup_parsed = json.loads(backup_text)
|
||||
except (OSError, json.JSONDecodeError) as e:
|
||||
report["error"] = f"failed to load backed-up jobs: {e}"
|
||||
return report
|
||||
# jobs.json on disk is `{"jobs": [...], "updated_at": ...}`; accept both
|
||||
# that shape and a bare list for forward compat.
|
||||
if isinstance(backup_parsed, dict):
|
||||
backup_jobs = backup_parsed.get("jobs")
|
||||
elif isinstance(backup_parsed, list):
|
||||
backup_jobs = backup_parsed
|
||||
else:
|
||||
backup_jobs = None
|
||||
if not isinstance(backup_jobs, list):
|
||||
report["error"] = "backed-up cron-jobs.json has no jobs list"
|
||||
return report
|
||||
|
||||
# Build a lookup of the backed-up skill state keyed by job id.
|
||||
# We only need the two skill-ish fields (legacy single and modern list).
|
||||
backup_by_id: Dict[str, Dict[str, Any]] = {}
|
||||
for job in backup_jobs:
|
||||
if not isinstance(job, dict):
|
||||
continue
|
||||
jid = job.get("id")
|
||||
if not isinstance(jid, str) or not jid:
|
||||
continue
|
||||
backup_by_id[jid] = {
|
||||
"skills": job.get("skills"),
|
||||
"skill": job.get("skill"),
|
||||
"name": job.get("name") or jid,
|
||||
}
|
||||
|
||||
if not backup_by_id:
|
||||
report["attempted"] = True # we tried but there was nothing to do
|
||||
return report
|
||||
|
||||
# Load and rewrite the live jobs under the scheduler's lock.
|
||||
try:
|
||||
from cron.jobs import load_jobs, save_jobs, _jobs_file_lock
|
||||
except ImportError as e:
|
||||
report["error"] = f"cron module unavailable: {e}"
|
||||
return report
|
||||
|
||||
report["attempted"] = True
|
||||
try:
|
||||
with _jobs_file_lock:
|
||||
live_jobs = load_jobs()
|
||||
changed = False
|
||||
|
||||
live_ids = set()
|
||||
for live in live_jobs:
|
||||
if not isinstance(live, dict):
|
||||
continue
|
||||
jid = live.get("id")
|
||||
if not isinstance(jid, str) or not jid:
|
||||
continue
|
||||
live_ids.add(jid)
|
||||
|
||||
backup = backup_by_id.get(jid)
|
||||
if backup is None:
|
||||
continue # live job didn't exist at snapshot time
|
||||
|
||||
cur_skills = live.get("skills")
|
||||
cur_skill = live.get("skill")
|
||||
bkp_skills = backup.get("skills")
|
||||
bkp_skill = backup.get("skill")
|
||||
|
||||
if cur_skills == bkp_skills and cur_skill == bkp_skill:
|
||||
report["unchanged"] += 1
|
||||
continue
|
||||
|
||||
# Restore. Preserve absence (don't force the key to appear
|
||||
# if the backup didn't have it either).
|
||||
if bkp_skills is None:
|
||||
live.pop("skills", None)
|
||||
else:
|
||||
live["skills"] = bkp_skills
|
||||
if bkp_skill is None:
|
||||
live.pop("skill", None)
|
||||
else:
|
||||
live["skill"] = bkp_skill
|
||||
|
||||
report["restored"].append({
|
||||
"job_id": jid,
|
||||
"job_name": backup.get("name") or jid,
|
||||
"from": {"skills": cur_skills, "skill": cur_skill},
|
||||
"to": {"skills": bkp_skills, "skill": bkp_skill},
|
||||
})
|
||||
changed = True
|
||||
|
||||
# Jobs in backup but not in live = user deleted them after snapshot
|
||||
for jid, backup in backup_by_id.items():
|
||||
if jid not in live_ids:
|
||||
report["skipped_missing"].append({
|
||||
"job_id": jid,
|
||||
"job_name": backup.get("name") or jid,
|
||||
})
|
||||
|
||||
if changed:
|
||||
save_jobs(live_jobs)
|
||||
except Exception as e: # noqa: BLE001 — rollback must not die mid-restore
|
||||
logger.debug("Cron skill-link restore failed: %s", e, exc_info=True)
|
||||
report["error"] = f"restore failed mid-flight: {e}"
|
||||
|
||||
return report
|
||||
|
||||
|
||||
|
||||
def rollback(backup_id: Optional[str] = None) -> Tuple[bool, str, Optional[Path]]:
|
||||
"""Restore ``~/.hermes/skills/`` from a snapshot.
|
||||
|
||||
Strategy:
|
||||
1. Resolve the target snapshot (explicit id or newest regular).
|
||||
2. Take a safety snapshot of the CURRENT skills tree under
|
||||
``.curator_backups/pre-rollback-<ts>/`` so the rollback itself is
|
||||
undoable.
|
||||
3. Move all current top-level entries (except ``.curator_backups``
|
||||
and ``.hub``) into a tempdir.
|
||||
4. Extract the chosen snapshot into ``~/.hermes/skills/``.
|
||||
5. On failure during 4, move the tempdir contents back (best-effort)
|
||||
and return failure.
|
||||
|
||||
Returns ``(ok, message, snapshot_path)``.
|
||||
"""
|
||||
target = _resolve_backup(backup_id)
|
||||
if target is None:
|
||||
return (
|
||||
False,
|
||||
f"no matching backup found"
|
||||
+ (f" for id '{backup_id}'" if backup_id else "")
|
||||
+ " (use `hermes curator rollback --list` to see available snapshots)",
|
||||
None,
|
||||
)
|
||||
archive = target / "skills.tar.gz"
|
||||
if not archive.exists():
|
||||
return (False, f"snapshot {target.name} has no skills.tar.gz — corrupted?", None)
|
||||
|
||||
skills = _skills_dir()
|
||||
skills.mkdir(parents=True, exist_ok=True)
|
||||
backups = _backups_dir()
|
||||
backups.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Step 2: safety snapshot of current state FIRST. If this fails we bail
|
||||
# out before touching anything — otherwise a failed extract could leave
|
||||
# the user with no skills.
|
||||
try:
|
||||
snapshot_skills(reason=f"pre-rollback to {target.name}")
|
||||
except Exception as e:
|
||||
return (False, f"pre-rollback safety snapshot failed: {e}", None)
|
||||
|
||||
# Additionally move current entries into an internal staging dir so
|
||||
# the extract happens into an empty skills tree (predictable result).
|
||||
# This dir is implementation detail — not listed as a restorable
|
||||
# backup. The safety snapshot above is the user-facing undo handle.
|
||||
staged = backups / f".rollback-staging-{_utc_id()}"
|
||||
try:
|
||||
staged.mkdir(parents=True, exist_ok=False)
|
||||
except OSError as e:
|
||||
return (False, f"failed to create staging dir: {e}", None)
|
||||
|
||||
moved: List[Tuple[Path, Path]] = []
|
||||
try:
|
||||
for entry in list(skills.iterdir()):
|
||||
if entry.name in _EXCLUDE_TOP_LEVEL:
|
||||
continue
|
||||
dest = staged / entry.name
|
||||
shutil.move(str(entry), str(dest))
|
||||
moved.append((entry, dest))
|
||||
except OSError as e:
|
||||
# Best-effort rollback of the move
|
||||
for orig, dest in moved:
|
||||
try:
|
||||
shutil.move(str(dest), str(orig))
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
shutil.rmtree(staged, ignore_errors=True)
|
||||
except OSError:
|
||||
pass
|
||||
return (False, f"failed to stage current skills: {e}", None)
|
||||
|
||||
# Step 4: extract the snapshot into skills/
|
||||
try:
|
||||
with tarfile.open(archive, "r:gz") as tf:
|
||||
# Python 3.12+ supports filter='data' for safer extraction.
|
||||
# Fall back to the unfiltered call for older interpreters but
|
||||
# still reject absolute paths and .. components defensively.
|
||||
for member in tf.getmembers():
|
||||
name = member.name
|
||||
if name.startswith("/") or ".." in Path(name).parts:
|
||||
raise tarfile.TarError(
|
||||
f"refusing to extract unsafe path: {name!r}"
|
||||
)
|
||||
try:
|
||||
tf.extractall(str(skills), filter="data") # type: ignore[call-arg]
|
||||
except TypeError:
|
||||
# Python < 3.12 — no filter kwarg
|
||||
tf.extractall(str(skills))
|
||||
except (OSError, tarfile.TarError) as e:
|
||||
# Best-effort recover: move staged contents back
|
||||
for orig, dest in moved:
|
||||
try:
|
||||
shutil.move(str(dest), str(orig))
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
shutil.rmtree(staged, ignore_errors=True)
|
||||
except OSError:
|
||||
pass
|
||||
return (False, f"snapshot extract failed (state restored): {e}", None)
|
||||
|
||||
# Extract succeeded — the staging dir has served its purpose. The
|
||||
# user's undo handle is the safety snapshot tarball we took earlier.
|
||||
try:
|
||||
shutil.rmtree(staged, ignore_errors=True)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# Reconcile cron skill-links. Surgical: only the skills/skill fields
|
||||
# on jobs matched by id. Everything else in jobs.json is live state
|
||||
# (schedule, next_run_at, enabled, prompt, etc.) and we leave it
|
||||
# alone. Failures here don't fail the overall rollback — the skills
|
||||
# tree is already restored, which is the main guarantee.
|
||||
cron_report = _restore_cron_skill_links(target)
|
||||
|
||||
summary_bits = [f"restored from snapshot {target.name}"]
|
||||
if cron_report.get("attempted"):
|
||||
restored_n = len(cron_report.get("restored") or [])
|
||||
skipped_n = len(cron_report.get("skipped_missing") or [])
|
||||
if cron_report.get("error"):
|
||||
summary_bits.append(f"cron links: error — {cron_report['error']}")
|
||||
elif restored_n == 0 and skipped_n == 0 and cron_report.get("unchanged", 0) == 0:
|
||||
# Attempted but nothing matched — empty snapshot or no overlapping ids.
|
||||
pass
|
||||
else:
|
||||
parts = []
|
||||
if restored_n:
|
||||
parts.append(f"{restored_n} job(s) had skill links restored")
|
||||
if skipped_n:
|
||||
parts.append(f"{skipped_n} backed-up job(s) no longer exist (skipped)")
|
||||
if cron_report.get("unchanged"):
|
||||
parts.append(f"{cron_report['unchanged']} already matched")
|
||||
summary_bits.append("cron links: " + ", ".join(parts))
|
||||
|
||||
logger.info("Curator rollback: restored from %s (cron_report=%s)",
|
||||
target.name, cron_report)
|
||||
return (True, "; ".join(summary_bits), target)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Human-readable summary for CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def format_size(n: int) -> str:
|
||||
for unit in ("B", "KB", "MB", "GB"):
|
||||
if n < 1024 or unit == "GB":
|
||||
return f"{n:.1f} {unit}" if unit != "B" else f"{n} B"
|
||||
n /= 1024
|
||||
return f"{n:.1f} GB"
|
||||
|
||||
|
||||
def summarize_backups() -> str:
|
||||
rows = list_backups()
|
||||
if not rows:
|
||||
return "No curator snapshots yet."
|
||||
lines = [f"{'id':<24} {'reason':<40} {'skills':>6} {'size':>8}"]
|
||||
lines.append("─" * len(lines[0]))
|
||||
for r in rows:
|
||||
lines.append(
|
||||
f"{r.get('id','?'):<24} "
|
||||
f"{(r.get('reason','?') or '?')[:40]:<40} "
|
||||
f"{r.get('skill_files', 0):>6} "
|
||||
f"{format_size(int(r.get('archive_bytes', 0))):>8}"
|
||||
)
|
||||
return "\n".join(lines)
|
||||
338
agent/display.py
338
agent/display.py
@@ -4,6 +4,7 @@ Pure display functions and classes with no AIAgent dependency.
|
||||
Used by AIAgent._execute_tool_calls for CLI feedback.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
@@ -13,8 +14,6 @@ from dataclasses import dataclass, field
|
||||
from difflib import unified_diff
|
||||
from pathlib import Path
|
||||
|
||||
from utils import safe_json_loads
|
||||
|
||||
# ANSI escape codes for coloring tool failure indicators
|
||||
_RED = "\033[31m"
|
||||
_RESET = "\033[0m"
|
||||
@@ -22,67 +21,11 @@ _RESET = "\033[0m"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_ANSI_RESET = "\033[0m"
|
||||
|
||||
# Diff colors — resolved lazily from the skin engine so they adapt
|
||||
# to light/dark themes. Falls back to sensible defaults on import
|
||||
# failure. We cache after first resolution for performance.
|
||||
_diff_colors_cached: dict[str, str] | None = None
|
||||
|
||||
|
||||
def _diff_ansi() -> dict[str, str]:
|
||||
"""Return ANSI escapes for diff display, resolved from the active skin."""
|
||||
global _diff_colors_cached
|
||||
if _diff_colors_cached is not None:
|
||||
return _diff_colors_cached
|
||||
|
||||
# Defaults that work on dark terminals
|
||||
dim = "\033[38;2;150;150;150m"
|
||||
file_c = "\033[38;2;180;160;255m"
|
||||
hunk = "\033[38;2;120;120;140m"
|
||||
minus = "\033[38;2;255;255;255;48;2;120;20;20m"
|
||||
plus = "\033[38;2;255;255;255;48;2;20;90;20m"
|
||||
|
||||
try:
|
||||
from hermes_cli.skin_engine import get_active_skin
|
||||
skin = get_active_skin()
|
||||
|
||||
def _hex_fg(key: str, fallback_rgb: tuple[int, int, int]) -> str:
|
||||
h = skin.get_color(key, "")
|
||||
if h and len(h) == 7 and h[0] == "#":
|
||||
r, g, b = int(h[1:3], 16), int(h[3:5], 16), int(h[5:7], 16)
|
||||
return f"\033[38;2;{r};{g};{b}m"
|
||||
r, g, b = fallback_rgb
|
||||
return f"\033[38;2;{r};{g};{b}m"
|
||||
|
||||
dim = _hex_fg("banner_dim", (150, 150, 150))
|
||||
file_c = _hex_fg("session_label", (180, 160, 255))
|
||||
hunk = _hex_fg("session_border", (120, 120, 140))
|
||||
# minus/plus use background colors — derive from ui_error/ui_ok
|
||||
err_h = skin.get_color("ui_error", "#ef5350")
|
||||
ok_h = skin.get_color("ui_ok", "#4caf50")
|
||||
if err_h and len(err_h) == 7:
|
||||
er, eg, eb = int(err_h[1:3], 16), int(err_h[3:5], 16), int(err_h[5:7], 16)
|
||||
# Use a dark tinted version as background
|
||||
minus = f"\033[38;2;255;255;255;48;2;{max(er//2,20)};{max(eg//4,10)};{max(eb//4,10)}m"
|
||||
if ok_h and len(ok_h) == 7:
|
||||
or_, og, ob = int(ok_h[1:3], 16), int(ok_h[3:5], 16), int(ok_h[5:7], 16)
|
||||
plus = f"\033[38;2;255;255;255;48;2;{max(or_//4,10)};{max(og//2,20)};{max(ob//4,10)}m"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_diff_colors_cached = {
|
||||
"dim": dim, "file": file_c, "hunk": hunk,
|
||||
"minus": minus, "plus": plus,
|
||||
}
|
||||
return _diff_colors_cached
|
||||
|
||||
|
||||
# Module-level helpers — each call resolves from the active skin lazily.
|
||||
def _diff_dim(): return _diff_ansi()["dim"]
|
||||
def _diff_file(): return _diff_ansi()["file"]
|
||||
def _diff_hunk(): return _diff_ansi()["hunk"]
|
||||
def _diff_minus(): return _diff_ansi()["minus"]
|
||||
def _diff_plus(): return _diff_ansi()["plus"]
|
||||
_ANSI_DIM = "\033[38;2;150;150;150m"
|
||||
_ANSI_FILE = "\033[38;2;180;160;255m"
|
||||
_ANSI_HUNK = "\033[38;2;120;120;140m"
|
||||
_ANSI_MINUS = "\033[38;2;255;255;255;48;2;120;20;20m"
|
||||
_ANSI_PLUS = "\033[38;2;255;255;255;48;2;20;90;20m"
|
||||
_MAX_INLINE_DIFF_FILES = 6
|
||||
_MAX_INLINE_DIFF_LINES = 80
|
||||
|
||||
@@ -124,6 +67,26 @@ def _get_skin():
|
||||
return None
|
||||
|
||||
|
||||
def get_skin_faces(key: str, default: list) -> list:
|
||||
"""Get spinner face list from active skin, falling back to default."""
|
||||
skin = _get_skin()
|
||||
if skin:
|
||||
faces = skin.get_spinner_list(key)
|
||||
if faces:
|
||||
return faces
|
||||
return default
|
||||
|
||||
|
||||
def get_skin_verbs() -> list:
|
||||
"""Get thinking verbs from active skin."""
|
||||
skin = _get_skin()
|
||||
if skin:
|
||||
verbs = skin.get_spinner_list("thinking_verbs")
|
||||
if verbs:
|
||||
return verbs
|
||||
return KawaiiSpinner.THINKING_VERBS
|
||||
|
||||
|
||||
def get_skin_tool_prefix() -> str:
|
||||
"""Get tool output prefix character from active skin."""
|
||||
skin = _get_skin()
|
||||
@@ -225,11 +188,9 @@ def build_tool_preview(tool_name: str, args: dict, max_len: int | None = None) -
|
||||
content = _oneline(args.get("content", ""))
|
||||
return f"+{target}: \"{content[:25]}{'...' if len(content) > 25 else ''}\""
|
||||
elif action == "replace":
|
||||
old = _oneline(args.get("old_text") or "") or "<missing old_text>"
|
||||
return f"~{target}: \"{old[:20]}\""
|
||||
return f"~{target}: \"{_oneline(args.get('old_text', '')[:20])}\""
|
||||
elif action == "remove":
|
||||
old = _oneline(args.get("old_text") or "") or "<missing old_text>"
|
||||
return f"-{target}: \"{old[:20]}\""
|
||||
return f"-{target}: \"{_oneline(args.get('old_text', '')[:20])}\""
|
||||
return action
|
||||
|
||||
if tool_name == "send_message":
|
||||
@@ -369,8 +330,9 @@ def _result_succeeded(result: str | None) -> bool:
|
||||
"""Conservatively detect whether a tool result represents success."""
|
||||
if not result:
|
||||
return False
|
||||
data = safe_json_loads(result)
|
||||
if data is None:
|
||||
try:
|
||||
data = json.loads(result)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return False
|
||||
if not isinstance(data, dict):
|
||||
return False
|
||||
@@ -419,7 +381,10 @@ def extract_edit_diff(
|
||||
) -> str | None:
|
||||
"""Extract a unified diff from a file-edit tool result."""
|
||||
if tool_name == "patch" and result:
|
||||
data = safe_json_loads(result)
|
||||
try:
|
||||
data = json.loads(result)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
data = None
|
||||
if isinstance(data, dict):
|
||||
diff = data.get("diff")
|
||||
if isinstance(diff, str) and diff.strip():
|
||||
@@ -458,19 +423,19 @@ def _render_inline_unified_diff(diff: str) -> list[str]:
|
||||
if raw_line.startswith("+++ "):
|
||||
to_file = raw_line[4:].strip()
|
||||
if from_file or to_file:
|
||||
rendered.append(f"{_diff_file()}{from_file or 'a/?'} → {to_file or 'b/?'}{_ANSI_RESET}")
|
||||
rendered.append(f"{_ANSI_FILE}{from_file or 'a/?'} → {to_file or 'b/?'}{_ANSI_RESET}")
|
||||
continue
|
||||
if raw_line.startswith("@@"):
|
||||
rendered.append(f"{_diff_hunk()}{raw_line}{_ANSI_RESET}")
|
||||
rendered.append(f"{_ANSI_HUNK}{raw_line}{_ANSI_RESET}")
|
||||
continue
|
||||
if raw_line.startswith("-"):
|
||||
rendered.append(f"{_diff_minus()}{raw_line}{_ANSI_RESET}")
|
||||
rendered.append(f"{_ANSI_MINUS}{raw_line}{_ANSI_RESET}")
|
||||
continue
|
||||
if raw_line.startswith("+"):
|
||||
rendered.append(f"{_diff_plus()}{raw_line}{_ANSI_RESET}")
|
||||
rendered.append(f"{_ANSI_PLUS}{raw_line}{_ANSI_RESET}")
|
||||
continue
|
||||
if raw_line.startswith(" "):
|
||||
rendered.append(f"{_diff_dim()}{raw_line}{_ANSI_RESET}")
|
||||
rendered.append(f"{_ANSI_DIM}{raw_line}{_ANSI_RESET}")
|
||||
continue
|
||||
if raw_line:
|
||||
rendered.append(raw_line)
|
||||
@@ -536,7 +501,7 @@ def _summarize_rendered_diff_sections(
|
||||
summary = f"… omitted {omitted_lines} diff line(s)"
|
||||
if omitted_files:
|
||||
summary += f" across {omitted_files} additional file(s)/section(s)"
|
||||
rendered.append(f"{_diff_hunk()}{summary}{_ANSI_RESET}")
|
||||
rendered.append(f"{_ANSI_HUNK}{summary}{_ANSI_RESET}")
|
||||
|
||||
return rendered
|
||||
|
||||
@@ -602,45 +567,6 @@ class KawaiiSpinner:
|
||||
"analyzing", "computing", "synthesizing", "formulating", "brainstorming",
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_waiting_faces(cls) -> list:
|
||||
"""Return waiting faces from the active skin, falling back to KAWAII_WAITING."""
|
||||
try:
|
||||
skin = _get_skin()
|
||||
if skin:
|
||||
faces = skin.spinner.get("waiting_faces", [])
|
||||
if faces:
|
||||
return faces
|
||||
except Exception:
|
||||
pass
|
||||
return cls.KAWAII_WAITING
|
||||
|
||||
@classmethod
|
||||
def get_thinking_faces(cls) -> list:
|
||||
"""Return thinking faces from the active skin, falling back to KAWAII_THINKING."""
|
||||
try:
|
||||
skin = _get_skin()
|
||||
if skin:
|
||||
faces = skin.spinner.get("thinking_faces", [])
|
||||
if faces:
|
||||
return faces
|
||||
except Exception:
|
||||
pass
|
||||
return cls.KAWAII_THINKING
|
||||
|
||||
@classmethod
|
||||
def get_thinking_verbs(cls) -> list:
|
||||
"""Return thinking verbs from the active skin, falling back to THINKING_VERBS."""
|
||||
try:
|
||||
skin = _get_skin()
|
||||
if skin:
|
||||
verbs = skin.spinner.get("thinking_verbs", [])
|
||||
if verbs:
|
||||
return verbs
|
||||
except Exception:
|
||||
pass
|
||||
return cls.THINKING_VERBS
|
||||
|
||||
def __init__(self, message: str = "", spinner_type: str = 'dots', print_fn=None):
|
||||
self.message = message
|
||||
self.spinner_frames = self.SPINNERS.get(spinner_type, self.SPINNERS['dots'])
|
||||
@@ -797,6 +723,46 @@ class KawaiiSpinner:
|
||||
return False
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Kawaii face arrays (used by AIAgent._execute_tool_calls for spinner text)
|
||||
# =========================================================================
|
||||
|
||||
KAWAII_SEARCH = [
|
||||
"♪(´ε` )", "(。◕‿◕。)", "ヾ(^∇^)", "(◕ᴗ◕✿)", "( ˘▽˘)っ",
|
||||
"٩(◕‿◕。)۶", "(✿◠‿◠)", "♪~(´ε` )", "(ノ´ヮ`)ノ*:・゚✧", "\(◎o◎)/",
|
||||
]
|
||||
KAWAII_READ = [
|
||||
"φ(゜▽゜*)♪", "( ˘▽˘)っ", "(⌐■_■)", "٩(。•́‿•̀。)۶", "(◕‿◕✿)",
|
||||
"ヾ(@⌒ー⌒@)ノ", "(✧ω✧)", "♪(๑ᴖ◡ᴖ๑)♪", "(≧◡≦)", "( ´ ▽ ` )ノ",
|
||||
]
|
||||
KAWAII_TERMINAL = [
|
||||
"ヽ(>∀<☆)ノ", "(ノ°∀°)ノ", "٩(^ᴗ^)۶", "ヾ(⌐■_■)ノ♪", "(•̀ᴗ•́)و",
|
||||
"┗(^0^)┓", "(`・ω・´)", "\( ̄▽ ̄)/", "(ง •̀_•́)ง", "ヽ(´▽`)/",
|
||||
]
|
||||
KAWAII_BROWSER = [
|
||||
"(ノ°∀°)ノ", "(☞゚ヮ゚)☞", "( ͡° ͜ʖ ͡°)", "┌( ಠ_ಠ)┘", "(⊙_⊙)?",
|
||||
"ヾ(•ω•`)o", "( ̄ω ̄)", "( ˇωˇ )", "(ᵔᴥᵔ)", "\(◎o◎)/",
|
||||
]
|
||||
KAWAII_CREATE = [
|
||||
"✧*。٩(ˊᗜˋ*)و✧", "(ノ◕ヮ◕)ノ*:・゚✧", "ヽ(>∀<☆)ノ", "٩(♡ε♡)۶", "(◕‿◕)♡",
|
||||
"✿◕ ‿ ◕✿", "(*≧▽≦)", "ヾ(^-^)ノ", "(☆▽☆)", "°˖✧◝(⁰▿⁰)◜✧˖°",
|
||||
]
|
||||
KAWAII_SKILL = [
|
||||
"ヾ(@⌒ー⌒@)ノ", "(๑˃ᴗ˂)ﻭ", "٩(◕‿◕。)۶", "(✿╹◡╹)", "ヽ(・∀・)ノ",
|
||||
"(ノ´ヮ`)ノ*:・゚✧", "♪(๑ᴖ◡ᴖ๑)♪", "(◠‿◠)", "٩(ˊᗜˋ*)و", "(^▽^)",
|
||||
"ヾ(^∇^)", "(★ω★)/", "٩(。•́‿•̀。)۶", "(◕ᴗ◕✿)", "\(◎o◎)/",
|
||||
"(✧ω✧)", "ヽ(>∀<☆)ノ", "( ˘▽˘)っ", "(≧◡≦) ♡", "ヾ( ̄▽ ̄)",
|
||||
]
|
||||
KAWAII_THINK = [
|
||||
"(っ°Д°;)っ", "(;′⌒`)", "(・_・ヾ", "( ´_ゝ`)", "( ̄ヘ ̄)",
|
||||
"(。-`ω´-)", "( ˘︹˘ )", "(¬_¬)", "ヽ(ー_ー )ノ", "(;一_一)",
|
||||
]
|
||||
KAWAII_GENERIC = [
|
||||
"♪(´ε` )", "(◕‿◕✿)", "ヾ(^∇^)", "٩(◕‿◕。)۶", "(✿◠‿◠)",
|
||||
"(ノ´ヮ`)ノ*:・゚✧", "ヽ(>∀<☆)ノ", "(☆▽☆)", "( ˘▽˘)っ", "(≧◡≦)",
|
||||
]
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Cute tool message (completion line that replaces the spinner)
|
||||
# =========================================================================
|
||||
@@ -812,19 +778,23 @@ def _detect_tool_failure(tool_name: str, result: str | None) -> tuple[bool, str]
|
||||
return False, ""
|
||||
|
||||
if tool_name == "terminal":
|
||||
data = safe_json_loads(result)
|
||||
if isinstance(data, dict):
|
||||
try:
|
||||
data = json.loads(result)
|
||||
exit_code = data.get("exit_code")
|
||||
if exit_code is not None and exit_code != 0:
|
||||
return True, f" [exit {exit_code}]"
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
logger.debug("Could not parse terminal result as JSON for exit code check")
|
||||
return False, ""
|
||||
|
||||
# Memory-specific: distinguish "full" from real errors
|
||||
if tool_name == "memory":
|
||||
data = safe_json_loads(result)
|
||||
if isinstance(data, dict):
|
||||
try:
|
||||
data = json.loads(result)
|
||||
if data.get("success") is False and "exceed the limit" in data.get("error", ""):
|
||||
return True, " [full]"
|
||||
except (json.JSONDecodeError, TypeError, AttributeError):
|
||||
logger.debug("Could not parse memory result as JSON for capacity check")
|
||||
|
||||
# Generic heuristic for non-terminal tools
|
||||
lower = result[:500].lower()
|
||||
@@ -852,15 +822,13 @@ def get_cute_tool_message(
|
||||
s = str(s)
|
||||
if _tool_preview_max_len == 0:
|
||||
return s # no limit
|
||||
limit = _tool_preview_max_len
|
||||
return (s[:limit-3] + "...") if len(s) > limit else s
|
||||
return (s[:n-3] + "...") if len(s) > n else s
|
||||
|
||||
def _path(p, n=35):
|
||||
p = str(p)
|
||||
if _tool_preview_max_len == 0:
|
||||
return p # no limit
|
||||
limit = _tool_preview_max_len
|
||||
return ("..." + p[-(limit-3):]) if len(p) > limit else p
|
||||
return ("..." + p[-(n-3):]) if len(p) > n else p
|
||||
|
||||
def _wrap(line: str) -> str:
|
||||
"""Apply skin tool prefix and failure suffix."""
|
||||
@@ -922,6 +890,8 @@ def get_cute_tool_message(
|
||||
return _wrap(f"┊ ◀️ back {dur}")
|
||||
if tool_name == "browser_press":
|
||||
return _wrap(f"┊ ⌨️ press {args.get('key', '?')} {dur}")
|
||||
if tool_name == "browser_close":
|
||||
return _wrap(f"┊ 🚪 close browser {dur}")
|
||||
if tool_name == "browser_get_images":
|
||||
return _wrap(f"┊ 🖼️ images extracting {dur}")
|
||||
if tool_name == "browser_vision":
|
||||
@@ -943,13 +913,9 @@ def get_cute_tool_message(
|
||||
if action == "add":
|
||||
return _wrap(f"┊ 🧠 memory +{target}: \"{_trunc(args.get('content', ''), 30)}\" {dur}")
|
||||
elif action == "replace":
|
||||
old = args.get("old_text") or ""
|
||||
old = old if old else "<missing old_text>"
|
||||
return _wrap(f"┊ 🧠 memory ~{target}: \"{_trunc(old, 20)}\" {dur}")
|
||||
return _wrap(f"┊ 🧠 memory ~{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
|
||||
elif action == "remove":
|
||||
old = args.get("old_text") or ""
|
||||
old = old if old else "<missing old_text>"
|
||||
return _wrap(f"┊ 🧠 memory -{target}: \"{_trunc(old, 20)}\" {dur}")
|
||||
return _wrap(f"┊ 🧠 memory -{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
|
||||
return _wrap(f"┊ 🧠 memory {action} {dur}")
|
||||
if tool_name == "skills_list":
|
||||
return _wrap(f"┊ 📚 skills list {args.get('category', 'all')} {dur}")
|
||||
@@ -1001,4 +967,118 @@ def get_cute_tool_message(
|
||||
# Honcho session line (one-liner with clickable OSC 8 hyperlink)
|
||||
# =========================================================================
|
||||
|
||||
_DIM = "\033[2m"
|
||||
_SKY_BLUE = "\033[38;5;117m"
|
||||
_ANSI_RESET = "\033[0m"
|
||||
|
||||
|
||||
def honcho_session_url(workspace: str, session_name: str) -> str:
|
||||
"""Build a Honcho app URL for a session."""
|
||||
from urllib.parse import quote
|
||||
return (
|
||||
f"https://app.honcho.dev/explore"
|
||||
f"?workspace={quote(workspace, safe='')}"
|
||||
f"&view=sessions"
|
||||
f"&session={quote(session_name, safe='')}"
|
||||
)
|
||||
|
||||
|
||||
def _osc8_link(url: str, text: str) -> str:
|
||||
"""OSC 8 terminal hyperlink (clickable in iTerm2, Ghostty, WezTerm, etc.)."""
|
||||
return f"\033]8;;{url}\033\\{text}\033]8;;\033\\"
|
||||
|
||||
|
||||
def honcho_session_line(workspace: str, session_name: str) -> str:
|
||||
"""One-line session indicator: `Honcho session: <clickable name>`."""
|
||||
url = honcho_session_url(workspace, session_name)
|
||||
linked_name = _osc8_link(url, f"{_SKY_BLUE}{session_name}{_ANSI_RESET}")
|
||||
return f"{_DIM}Honcho session:{_ANSI_RESET} {linked_name}"
|
||||
|
||||
|
||||
def write_tty(text: str) -> None:
|
||||
"""Write directly to /dev/tty, bypassing stdout capture."""
|
||||
try:
|
||||
fd = os.open("/dev/tty", os.O_WRONLY)
|
||||
os.write(fd, text.encode("utf-8"))
|
||||
os.close(fd)
|
||||
except OSError:
|
||||
sys.stdout.write(text)
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Context pressure display (CLI user-facing warnings)
|
||||
# =========================================================================
|
||||
|
||||
# ANSI color codes for context pressure tiers
|
||||
_CYAN = "\033[36m"
|
||||
_YELLOW = "\033[33m"
|
||||
_BOLD = "\033[1m"
|
||||
_DIM_ANSI = "\033[2m"
|
||||
|
||||
# Bar characters
|
||||
_BAR_FILLED = "▰"
|
||||
_BAR_EMPTY = "▱"
|
||||
_BAR_WIDTH = 20
|
||||
|
||||
|
||||
def format_context_pressure(
|
||||
compaction_progress: float,
|
||||
threshold_tokens: int,
|
||||
threshold_percent: float,
|
||||
compression_enabled: bool = True,
|
||||
) -> str:
|
||||
"""Build a formatted context pressure line for CLI display.
|
||||
|
||||
The bar and percentage show progress toward the compaction threshold,
|
||||
NOT the raw context window. 100% = compaction fires.
|
||||
|
||||
Args:
|
||||
compaction_progress: How close to compaction (0.0–1.0, 1.0 = fires).
|
||||
threshold_tokens: Compaction threshold in tokens.
|
||||
threshold_percent: Compaction threshold as a fraction of context window.
|
||||
compression_enabled: Whether auto-compression is active.
|
||||
"""
|
||||
pct_int = min(int(compaction_progress * 100), 100)
|
||||
filled = min(int(compaction_progress * _BAR_WIDTH), _BAR_WIDTH)
|
||||
bar = _BAR_FILLED * filled + _BAR_EMPTY * (_BAR_WIDTH - filled)
|
||||
|
||||
threshold_k = f"{threshold_tokens // 1000}k" if threshold_tokens >= 1000 else str(threshold_tokens)
|
||||
threshold_pct_int = int(threshold_percent * 100)
|
||||
|
||||
color = f"{_BOLD}{_YELLOW}"
|
||||
icon = "⚠"
|
||||
if compression_enabled:
|
||||
hint = "compaction approaching"
|
||||
else:
|
||||
hint = "no auto-compaction"
|
||||
|
||||
return (
|
||||
f" {color}{icon} context {bar} {pct_int}% to compaction{_ANSI_RESET}"
|
||||
f" {_DIM_ANSI}{threshold_k} threshold ({threshold_pct_int}%) · {hint}{_ANSI_RESET}"
|
||||
)
|
||||
|
||||
|
||||
def format_context_pressure_gateway(
|
||||
compaction_progress: float,
|
||||
threshold_percent: float,
|
||||
compression_enabled: bool = True,
|
||||
) -> str:
|
||||
"""Build a plain-text context pressure notification for messaging platforms.
|
||||
|
||||
No ANSI — just Unicode and plain text suitable for Telegram/Discord/etc.
|
||||
The percentage shows progress toward the compaction threshold.
|
||||
"""
|
||||
pct_int = min(int(compaction_progress * 100), 100)
|
||||
filled = min(int(compaction_progress * _BAR_WIDTH), _BAR_WIDTH)
|
||||
bar = _BAR_FILLED * filled + _BAR_EMPTY * (_BAR_WIDTH - filled)
|
||||
|
||||
threshold_pct_int = int(threshold_percent * 100)
|
||||
|
||||
icon = "⚠️"
|
||||
if compression_enabled:
|
||||
hint = f"Context compaction approaching (threshold: {threshold_pct_int}% of window)."
|
||||
else:
|
||||
hint = "Auto-compaction is disabled — context may be truncated."
|
||||
|
||||
return f"{icon} Context: {bar} {pct_int}% to compaction\n{hint}"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,111 +0,0 @@
|
||||
"""Shared file safety rules used by both tools and ACP shims."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def _hermes_home_path() -> Path:
|
||||
"""Resolve the active HERMES_HOME (profile-aware) without circular imports."""
|
||||
try:
|
||||
from hermes_constants import get_hermes_home # local import to avoid cycles
|
||||
return get_hermes_home()
|
||||
except Exception:
|
||||
return Path(os.path.expanduser("~/.hermes"))
|
||||
|
||||
|
||||
def build_write_denied_paths(home: str) -> set[str]:
|
||||
"""Return exact sensitive paths that must never be written."""
|
||||
hermes_home = _hermes_home_path()
|
||||
return {
|
||||
os.path.realpath(p)
|
||||
for p in [
|
||||
os.path.join(home, ".ssh", "authorized_keys"),
|
||||
os.path.join(home, ".ssh", "id_rsa"),
|
||||
os.path.join(home, ".ssh", "id_ed25519"),
|
||||
os.path.join(home, ".ssh", "config"),
|
||||
str(hermes_home / ".env"),
|
||||
os.path.join(home, ".bashrc"),
|
||||
os.path.join(home, ".zshrc"),
|
||||
os.path.join(home, ".profile"),
|
||||
os.path.join(home, ".bash_profile"),
|
||||
os.path.join(home, ".zprofile"),
|
||||
os.path.join(home, ".netrc"),
|
||||
os.path.join(home, ".pgpass"),
|
||||
os.path.join(home, ".npmrc"),
|
||||
os.path.join(home, ".pypirc"),
|
||||
"/etc/sudoers",
|
||||
"/etc/passwd",
|
||||
"/etc/shadow",
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def build_write_denied_prefixes(home: str) -> list[str]:
|
||||
"""Return sensitive directory prefixes that must never be written."""
|
||||
return [
|
||||
os.path.realpath(p) + os.sep
|
||||
for p in [
|
||||
os.path.join(home, ".ssh"),
|
||||
os.path.join(home, ".aws"),
|
||||
os.path.join(home, ".gnupg"),
|
||||
os.path.join(home, ".kube"),
|
||||
"/etc/sudoers.d",
|
||||
"/etc/systemd",
|
||||
os.path.join(home, ".docker"),
|
||||
os.path.join(home, ".azure"),
|
||||
os.path.join(home, ".config", "gh"),
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
def get_safe_write_root() -> Optional[str]:
|
||||
"""Return the resolved HERMES_WRITE_SAFE_ROOT path, or None if unset."""
|
||||
root = os.getenv("HERMES_WRITE_SAFE_ROOT", "")
|
||||
if not root:
|
||||
return None
|
||||
try:
|
||||
return os.path.realpath(os.path.expanduser(root))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def is_write_denied(path: str) -> bool:
|
||||
"""Return True if path is blocked by the write denylist or safe root."""
|
||||
home = os.path.realpath(os.path.expanduser("~"))
|
||||
resolved = os.path.realpath(os.path.expanduser(str(path)))
|
||||
|
||||
if resolved in build_write_denied_paths(home):
|
||||
return True
|
||||
for prefix in build_write_denied_prefixes(home):
|
||||
if resolved.startswith(prefix):
|
||||
return True
|
||||
|
||||
safe_root = get_safe_write_root()
|
||||
if safe_root and not (resolved == safe_root or resolved.startswith(safe_root + os.sep)):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_read_block_error(path: str) -> Optional[str]:
|
||||
"""Return an error message when a read targets internal Hermes cache files."""
|
||||
resolved = Path(path).expanduser().resolve()
|
||||
hermes_home = _hermes_home_path().resolve()
|
||||
blocked_dirs = [
|
||||
hermes_home / "skills" / ".hub" / "index-cache",
|
||||
hermes_home / "skills" / ".hub",
|
||||
]
|
||||
for blocked in blocked_dirs:
|
||||
try:
|
||||
resolved.relative_to(blocked)
|
||||
except ValueError:
|
||||
continue
|
||||
return (
|
||||
f"Access denied: {path} is an internal Hermes cache file "
|
||||
"and cannot be read directly to prevent prompt injection. "
|
||||
"Use the skills_list or skill_view tools instead."
|
||||
)
|
||||
return None
|
||||
@@ -1,903 +0,0 @@
|
||||
"""OpenAI-compatible facade that talks to Google's Cloud Code Assist backend.
|
||||
|
||||
This adapter lets Hermes use the ``google-gemini-cli`` provider as if it were
|
||||
a standard OpenAI-shaped chat completion endpoint, while the underlying HTTP
|
||||
traffic goes to ``cloudcode-pa.googleapis.com/v1internal:{generateContent,
|
||||
streamGenerateContent}`` with a Bearer access token obtained via OAuth PKCE.
|
||||
|
||||
Architecture
|
||||
------------
|
||||
- ``GeminiCloudCodeClient`` exposes ``.chat.completions.create(**kwargs)``
|
||||
mirroring the subset of the OpenAI SDK that ``run_agent.py`` uses.
|
||||
- Incoming OpenAI ``messages[]`` / ``tools[]`` / ``tool_choice`` are translated
|
||||
to Gemini's native ``contents[]`` / ``tools[].functionDeclarations`` /
|
||||
``toolConfig`` / ``systemInstruction`` shape.
|
||||
- The request body is wrapped ``{project, model, user_prompt_id, request}``
|
||||
per Code Assist API expectations.
|
||||
- Responses (``candidates[].content.parts[]``) are converted back to
|
||||
OpenAI ``choices[0].message`` shape with ``content`` + ``tool_calls``.
|
||||
- Streaming uses SSE (``?alt=sse``) and yields OpenAI-shaped delta chunks.
|
||||
|
||||
Attribution
|
||||
-----------
|
||||
Translation semantics follow jenslys/opencode-gemini-auth (MIT) and the public
|
||||
Gemini API docs. Request envelope shape
|
||||
(``{project, model, user_prompt_id, request}``) is documented nowhere; it is
|
||||
reverse-engineered from the opencode-gemini-auth and clawdbot implementations.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from agent import google_oauth
|
||||
from agent.gemini_schema import sanitize_gemini_tool_parameters
|
||||
from agent.google_code_assist import (
|
||||
CODE_ASSIST_ENDPOINT,
|
||||
CodeAssistError,
|
||||
ProjectContext,
|
||||
resolve_project_context,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Request translation: OpenAI → Gemini
|
||||
# =============================================================================
|
||||
|
||||
_ROLE_MAP_OPENAI_TO_GEMINI = {
|
||||
"user": "user",
|
||||
"assistant": "model",
|
||||
"system": "user", # handled separately via systemInstruction
|
||||
"tool": "user", # functionResponse is wrapped in a user-role turn
|
||||
"function": "user",
|
||||
}
|
||||
|
||||
|
||||
def _coerce_content_to_text(content: Any) -> str:
|
||||
"""OpenAI content may be str or a list of parts; reduce to plain text."""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
pieces: List[str] = []
|
||||
for p in content:
|
||||
if isinstance(p, str):
|
||||
pieces.append(p)
|
||||
elif isinstance(p, dict):
|
||||
if p.get("type") == "text" and isinstance(p.get("text"), str):
|
||||
pieces.append(p["text"])
|
||||
# Multimodal (image_url, etc.) — stub for now; log and skip
|
||||
elif p.get("type") in ("image_url", "input_audio"):
|
||||
logger.debug("Dropping multimodal part (not yet supported): %s", p.get("type"))
|
||||
return "\n".join(pieces)
|
||||
return str(content)
|
||||
|
||||
|
||||
def _translate_tool_call_to_gemini(tool_call: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""OpenAI tool_call -> Gemini functionCall part."""
|
||||
fn = tool_call.get("function") or {}
|
||||
args_raw = fn.get("arguments", "")
|
||||
try:
|
||||
args = json.loads(args_raw) if isinstance(args_raw, str) and args_raw else {}
|
||||
except json.JSONDecodeError:
|
||||
args = {"_raw": args_raw}
|
||||
if not isinstance(args, dict):
|
||||
args = {"_value": args}
|
||||
return {
|
||||
"functionCall": {
|
||||
"name": fn.get("name") or "",
|
||||
"args": args,
|
||||
},
|
||||
# Sentinel signature — matches opencode-gemini-auth's approach.
|
||||
# Without this, Code Assist rejects function calls that originated
|
||||
# outside its own chain.
|
||||
"thoughtSignature": "skip_thought_signature_validator",
|
||||
}
|
||||
|
||||
|
||||
def _translate_tool_result_to_gemini(message: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""OpenAI tool-role message -> Gemini functionResponse part.
|
||||
|
||||
The function name isn't in the OpenAI tool message directly; it must be
|
||||
passed via the assistant message that issued the call. For simplicity we
|
||||
look up ``name`` on the message (OpenAI SDK copies it there) or on the
|
||||
``tool_call_id`` cross-reference.
|
||||
"""
|
||||
name = str(message.get("name") or message.get("tool_call_id") or "tool")
|
||||
content = _coerce_content_to_text(message.get("content"))
|
||||
# Gemini expects the response as a dict under `response`. We wrap plain
|
||||
# text in {"output": "..."}.
|
||||
try:
|
||||
parsed = json.loads(content) if content.strip().startswith(("{", "[")) else None
|
||||
except json.JSONDecodeError:
|
||||
parsed = None
|
||||
response = parsed if isinstance(parsed, dict) else {"output": content}
|
||||
return {
|
||||
"functionResponse": {
|
||||
"name": name,
|
||||
"response": response,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _build_gemini_contents(
|
||||
messages: List[Dict[str, Any]],
|
||||
) -> tuple[List[Dict[str, Any]], Optional[Dict[str, Any]]]:
|
||||
"""Convert OpenAI messages[] to Gemini contents[] + systemInstruction."""
|
||||
system_text_parts: List[str] = []
|
||||
contents: List[Dict[str, Any]] = []
|
||||
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
role = str(msg.get("role") or "user")
|
||||
|
||||
if role == "system":
|
||||
system_text_parts.append(_coerce_content_to_text(msg.get("content")))
|
||||
continue
|
||||
|
||||
# Tool result message — emit a user-role turn with functionResponse
|
||||
if role == "tool" or role == "function":
|
||||
contents.append({
|
||||
"role": "user",
|
||||
"parts": [_translate_tool_result_to_gemini(msg)],
|
||||
})
|
||||
continue
|
||||
|
||||
gemini_role = _ROLE_MAP_OPENAI_TO_GEMINI.get(role, "user")
|
||||
parts: List[Dict[str, Any]] = []
|
||||
|
||||
text = _coerce_content_to_text(msg.get("content"))
|
||||
if text:
|
||||
parts.append({"text": text})
|
||||
|
||||
# Assistant messages can carry tool_calls
|
||||
tool_calls = msg.get("tool_calls") or []
|
||||
if isinstance(tool_calls, list):
|
||||
for tc in tool_calls:
|
||||
if isinstance(tc, dict):
|
||||
parts.append(_translate_tool_call_to_gemini(tc))
|
||||
|
||||
if not parts:
|
||||
# Gemini rejects empty parts; skip the turn entirely
|
||||
continue
|
||||
|
||||
contents.append({"role": gemini_role, "parts": parts})
|
||||
|
||||
system_instruction: Optional[Dict[str, Any]] = None
|
||||
joined_system = "\n".join(p for p in system_text_parts if p).strip()
|
||||
if joined_system:
|
||||
system_instruction = {
|
||||
"role": "system",
|
||||
"parts": [{"text": joined_system}],
|
||||
}
|
||||
|
||||
return contents, system_instruction
|
||||
|
||||
|
||||
def _translate_tools_to_gemini(tools: Any) -> List[Dict[str, Any]]:
|
||||
"""OpenAI tools[] -> Gemini tools[].functionDeclarations[]."""
|
||||
if not isinstance(tools, list) or not tools:
|
||||
return []
|
||||
declarations: List[Dict[str, Any]] = []
|
||||
for t in tools:
|
||||
if not isinstance(t, dict):
|
||||
continue
|
||||
fn = t.get("function") or {}
|
||||
if not isinstance(fn, dict):
|
||||
continue
|
||||
name = fn.get("name")
|
||||
if not name:
|
||||
continue
|
||||
decl = {"name": str(name)}
|
||||
if fn.get("description"):
|
||||
decl["description"] = str(fn["description"])
|
||||
params = fn.get("parameters")
|
||||
if isinstance(params, dict):
|
||||
decl["parameters"] = sanitize_gemini_tool_parameters(params)
|
||||
declarations.append(decl)
|
||||
if not declarations:
|
||||
return []
|
||||
return [{"functionDeclarations": declarations}]
|
||||
|
||||
|
||||
def _translate_tool_choice_to_gemini(tool_choice: Any) -> Optional[Dict[str, Any]]:
|
||||
"""OpenAI tool_choice -> Gemini toolConfig.functionCallingConfig."""
|
||||
if tool_choice is None:
|
||||
return None
|
||||
if isinstance(tool_choice, str):
|
||||
if tool_choice == "auto":
|
||||
return {"functionCallingConfig": {"mode": "AUTO"}}
|
||||
if tool_choice == "required":
|
||||
return {"functionCallingConfig": {"mode": "ANY"}}
|
||||
if tool_choice == "none":
|
||||
return {"functionCallingConfig": {"mode": "NONE"}}
|
||||
if isinstance(tool_choice, dict):
|
||||
fn = tool_choice.get("function") or {}
|
||||
name = fn.get("name")
|
||||
if name:
|
||||
return {
|
||||
"functionCallingConfig": {
|
||||
"mode": "ANY",
|
||||
"allowedFunctionNames": [str(name)],
|
||||
},
|
||||
}
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_thinking_config(config: Any) -> Optional[Dict[str, Any]]:
|
||||
"""Accept thinkingBudget / thinkingLevel / includeThoughts (+ snake_case)."""
|
||||
if not isinstance(config, dict) or not config:
|
||||
return None
|
||||
budget = config.get("thinkingBudget", config.get("thinking_budget"))
|
||||
level = config.get("thinkingLevel", config.get("thinking_level"))
|
||||
include = config.get("includeThoughts", config.get("include_thoughts"))
|
||||
normalized: Dict[str, Any] = {}
|
||||
if isinstance(budget, (int, float)):
|
||||
normalized["thinkingBudget"] = int(budget)
|
||||
if isinstance(level, str) and level.strip():
|
||||
normalized["thinkingLevel"] = level.strip().lower()
|
||||
if isinstance(include, bool):
|
||||
normalized["includeThoughts"] = include
|
||||
return normalized or None
|
||||
|
||||
|
||||
def build_gemini_request(
|
||||
*,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Any = None,
|
||||
tool_choice: Any = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
stop: Any = None,
|
||||
thinking_config: Any = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build the inner Gemini request body (goes inside ``request`` wrapper)."""
|
||||
contents, system_instruction = _build_gemini_contents(messages)
|
||||
|
||||
body: Dict[str, Any] = {"contents": contents}
|
||||
if system_instruction is not None:
|
||||
body["systemInstruction"] = system_instruction
|
||||
|
||||
gemini_tools = _translate_tools_to_gemini(tools)
|
||||
if gemini_tools:
|
||||
body["tools"] = gemini_tools
|
||||
tool_cfg = _translate_tool_choice_to_gemini(tool_choice)
|
||||
if tool_cfg is not None:
|
||||
body["toolConfig"] = tool_cfg
|
||||
|
||||
generation_config: Dict[str, Any] = {}
|
||||
if isinstance(temperature, (int, float)):
|
||||
generation_config["temperature"] = float(temperature)
|
||||
if isinstance(max_tokens, int) and max_tokens > 0:
|
||||
generation_config["maxOutputTokens"] = max_tokens
|
||||
if isinstance(top_p, (int, float)):
|
||||
generation_config["topP"] = float(top_p)
|
||||
if isinstance(stop, str) and stop:
|
||||
generation_config["stopSequences"] = [stop]
|
||||
elif isinstance(stop, list) and stop:
|
||||
generation_config["stopSequences"] = [str(s) for s in stop if s]
|
||||
normalized_thinking = _normalize_thinking_config(thinking_config)
|
||||
if normalized_thinking:
|
||||
generation_config["thinkingConfig"] = normalized_thinking
|
||||
if generation_config:
|
||||
body["generationConfig"] = generation_config
|
||||
|
||||
return body
|
||||
|
||||
|
||||
def wrap_code_assist_request(
|
||||
*,
|
||||
project_id: str,
|
||||
model: str,
|
||||
inner_request: Dict[str, Any],
|
||||
user_prompt_id: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Wrap the inner Gemini request in the Code Assist envelope."""
|
||||
return {
|
||||
"project": project_id,
|
||||
"model": model,
|
||||
"user_prompt_id": user_prompt_id or str(uuid.uuid4()),
|
||||
"request": inner_request,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Response translation: Gemini → OpenAI
|
||||
# =============================================================================
|
||||
|
||||
def _translate_gemini_response(
|
||||
resp: Dict[str, Any],
|
||||
model: str,
|
||||
) -> SimpleNamespace:
|
||||
"""Non-streaming Gemini response -> OpenAI-shaped SimpleNamespace.
|
||||
|
||||
Code Assist wraps the actual Gemini response inside ``response``, so we
|
||||
unwrap it first if present.
|
||||
"""
|
||||
inner = resp.get("response") if isinstance(resp.get("response"), dict) else resp
|
||||
|
||||
candidates = inner.get("candidates") or []
|
||||
if not isinstance(candidates, list) or not candidates:
|
||||
return _empty_response(model)
|
||||
|
||||
cand = candidates[0]
|
||||
content_obj = cand.get("content") if isinstance(cand, dict) else {}
|
||||
parts = content_obj.get("parts") if isinstance(content_obj, dict) else []
|
||||
|
||||
text_pieces: List[str] = []
|
||||
reasoning_pieces: List[str] = []
|
||||
tool_calls: List[SimpleNamespace] = []
|
||||
|
||||
for i, part in enumerate(parts or []):
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
# Thought parts are model's internal reasoning — surface as reasoning,
|
||||
# don't mix into content.
|
||||
if part.get("thought") is True:
|
||||
if isinstance(part.get("text"), str):
|
||||
reasoning_pieces.append(part["text"])
|
||||
continue
|
||||
if isinstance(part.get("text"), str):
|
||||
text_pieces.append(part["text"])
|
||||
continue
|
||||
fc = part.get("functionCall")
|
||||
if isinstance(fc, dict) and fc.get("name"):
|
||||
try:
|
||||
args_str = json.dumps(fc.get("args") or {}, ensure_ascii=False)
|
||||
except (TypeError, ValueError):
|
||||
args_str = "{}"
|
||||
tool_calls.append(SimpleNamespace(
|
||||
id=f"call_{uuid.uuid4().hex[:12]}",
|
||||
type="function",
|
||||
index=i,
|
||||
function=SimpleNamespace(name=str(fc["name"]), arguments=args_str),
|
||||
))
|
||||
|
||||
finish_reason = "tool_calls" if tool_calls else _map_gemini_finish_reason(
|
||||
str(cand.get("finishReason") or "")
|
||||
)
|
||||
|
||||
usage_meta = inner.get("usageMetadata") or {}
|
||||
usage = SimpleNamespace(
|
||||
prompt_tokens=int(usage_meta.get("promptTokenCount") or 0),
|
||||
completion_tokens=int(usage_meta.get("candidatesTokenCount") or 0),
|
||||
total_tokens=int(usage_meta.get("totalTokenCount") or 0),
|
||||
prompt_tokens_details=SimpleNamespace(
|
||||
cached_tokens=int(usage_meta.get("cachedContentTokenCount") or 0),
|
||||
),
|
||||
)
|
||||
|
||||
message = SimpleNamespace(
|
||||
role="assistant",
|
||||
content="".join(text_pieces) if text_pieces else None,
|
||||
tool_calls=tool_calls or None,
|
||||
reasoning="".join(reasoning_pieces) or None,
|
||||
reasoning_content="".join(reasoning_pieces) or None,
|
||||
reasoning_details=None,
|
||||
)
|
||||
choice = SimpleNamespace(
|
||||
index=0,
|
||||
message=message,
|
||||
finish_reason=finish_reason,
|
||||
)
|
||||
return SimpleNamespace(
|
||||
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
||||
object="chat.completion",
|
||||
created=int(time.time()),
|
||||
model=model,
|
||||
choices=[choice],
|
||||
usage=usage,
|
||||
)
|
||||
|
||||
|
||||
def _empty_response(model: str) -> SimpleNamespace:
|
||||
message = SimpleNamespace(
|
||||
role="assistant", content="", tool_calls=None,
|
||||
reasoning=None, reasoning_content=None, reasoning_details=None,
|
||||
)
|
||||
choice = SimpleNamespace(index=0, message=message, finish_reason="stop")
|
||||
usage = SimpleNamespace(
|
||||
prompt_tokens=0, completion_tokens=0, total_tokens=0,
|
||||
prompt_tokens_details=SimpleNamespace(cached_tokens=0),
|
||||
)
|
||||
return SimpleNamespace(
|
||||
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
||||
object="chat.completion",
|
||||
created=int(time.time()),
|
||||
model=model,
|
||||
choices=[choice],
|
||||
usage=usage,
|
||||
)
|
||||
|
||||
|
||||
def _map_gemini_finish_reason(reason: str) -> str:
|
||||
mapping = {
|
||||
"STOP": "stop",
|
||||
"MAX_TOKENS": "length",
|
||||
"SAFETY": "content_filter",
|
||||
"RECITATION": "content_filter",
|
||||
"OTHER": "stop",
|
||||
}
|
||||
return mapping.get(reason.upper(), "stop")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Streaming SSE iterator
|
||||
# =============================================================================
|
||||
|
||||
class _GeminiStreamChunk(SimpleNamespace):
|
||||
"""Mimics an OpenAI ChatCompletionChunk with .choices[0].delta."""
|
||||
pass
|
||||
|
||||
|
||||
def _make_stream_chunk(
|
||||
*,
|
||||
model: str,
|
||||
content: str = "",
|
||||
tool_call_delta: Optional[Dict[str, Any]] = None,
|
||||
finish_reason: Optional[str] = None,
|
||||
reasoning: str = "",
|
||||
) -> _GeminiStreamChunk:
|
||||
delta_kwargs: Dict[str, Any] = {"role": "assistant"}
|
||||
if content:
|
||||
delta_kwargs["content"] = content
|
||||
if tool_call_delta is not None:
|
||||
delta_kwargs["tool_calls"] = [SimpleNamespace(
|
||||
index=tool_call_delta.get("index", 0),
|
||||
id=tool_call_delta.get("id") or f"call_{uuid.uuid4().hex[:12]}",
|
||||
type="function",
|
||||
function=SimpleNamespace(
|
||||
name=tool_call_delta.get("name") or "",
|
||||
arguments=tool_call_delta.get("arguments") or "",
|
||||
),
|
||||
)]
|
||||
if reasoning:
|
||||
delta_kwargs["reasoning"] = reasoning
|
||||
delta_kwargs["reasoning_content"] = reasoning
|
||||
delta = SimpleNamespace(**delta_kwargs)
|
||||
choice = SimpleNamespace(index=0, delta=delta, finish_reason=finish_reason)
|
||||
return _GeminiStreamChunk(
|
||||
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
||||
object="chat.completion.chunk",
|
||||
created=int(time.time()),
|
||||
model=model,
|
||||
choices=[choice],
|
||||
usage=None,
|
||||
)
|
||||
|
||||
|
||||
def _iter_sse_events(response: httpx.Response) -> Iterator[Dict[str, Any]]:
|
||||
"""Parse Server-Sent Events from an httpx streaming response."""
|
||||
buffer = ""
|
||||
for chunk in response.iter_text():
|
||||
if not chunk:
|
||||
continue
|
||||
buffer += chunk
|
||||
while "\n" in buffer:
|
||||
line, buffer = buffer.split("\n", 1)
|
||||
line = line.rstrip("\r")
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith("data: "):
|
||||
data = line[6:]
|
||||
if data == "[DONE]":
|
||||
return
|
||||
try:
|
||||
yield json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
logger.debug("Non-JSON SSE line: %s", data[:200])
|
||||
|
||||
|
||||
def _translate_stream_event(
|
||||
event: Dict[str, Any],
|
||||
model: str,
|
||||
tool_call_counter: List[int],
|
||||
) -> List[_GeminiStreamChunk]:
|
||||
"""Unwrap Code Assist envelope and emit OpenAI-shaped chunk(s).
|
||||
|
||||
``tool_call_counter`` is a single-element list used as a mutable counter
|
||||
across events in the same stream. Each ``functionCall`` part gets a
|
||||
fresh, unique OpenAI ``index`` — keying by function name would collide
|
||||
whenever the model issues parallel calls to the same tool (e.g. reading
|
||||
three files in one turn).
|
||||
"""
|
||||
inner = event.get("response") if isinstance(event.get("response"), dict) else event
|
||||
candidates = inner.get("candidates") or []
|
||||
if not candidates:
|
||||
return []
|
||||
cand = candidates[0]
|
||||
if not isinstance(cand, dict):
|
||||
return []
|
||||
|
||||
chunks: List[_GeminiStreamChunk] = []
|
||||
|
||||
content = cand.get("content") or {}
|
||||
parts = content.get("parts") if isinstance(content, dict) else []
|
||||
for part in parts or []:
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
if part.get("thought") is True and isinstance(part.get("text"), str):
|
||||
chunks.append(_make_stream_chunk(
|
||||
model=model, reasoning=part["text"],
|
||||
))
|
||||
continue
|
||||
if isinstance(part.get("text"), str) and part["text"]:
|
||||
chunks.append(_make_stream_chunk(model=model, content=part["text"]))
|
||||
fc = part.get("functionCall")
|
||||
if isinstance(fc, dict) and fc.get("name"):
|
||||
name = str(fc["name"])
|
||||
idx = tool_call_counter[0]
|
||||
tool_call_counter[0] += 1
|
||||
try:
|
||||
args_str = json.dumps(fc.get("args") or {}, ensure_ascii=False)
|
||||
except (TypeError, ValueError):
|
||||
args_str = "{}"
|
||||
chunks.append(_make_stream_chunk(
|
||||
model=model,
|
||||
tool_call_delta={
|
||||
"index": idx,
|
||||
"name": name,
|
||||
"arguments": args_str,
|
||||
},
|
||||
))
|
||||
|
||||
finish_reason_raw = str(cand.get("finishReason") or "")
|
||||
if finish_reason_raw:
|
||||
mapped = _map_gemini_finish_reason(finish_reason_raw)
|
||||
if tool_call_counter[0] > 0:
|
||||
mapped = "tool_calls"
|
||||
chunks.append(_make_stream_chunk(model=model, finish_reason=mapped))
|
||||
return chunks
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# GeminiCloudCodeClient — OpenAI-compatible facade
|
||||
# =============================================================================
|
||||
|
||||
MARKER_BASE_URL = "cloudcode-pa://google"
|
||||
|
||||
|
||||
class _GeminiChatCompletions:
|
||||
def __init__(self, client: "GeminiCloudCodeClient"):
|
||||
self._client = client
|
||||
|
||||
def create(self, **kwargs: Any) -> Any:
|
||||
return self._client._create_chat_completion(**kwargs)
|
||||
|
||||
|
||||
class _GeminiChatNamespace:
|
||||
def __init__(self, client: "GeminiCloudCodeClient"):
|
||||
self.completions = _GeminiChatCompletions(client)
|
||||
|
||||
|
||||
class GeminiCloudCodeClient:
|
||||
"""Minimal OpenAI-SDK-compatible facade over Code Assist v1internal."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
api_key: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
default_headers: Optional[Dict[str, str]] = None,
|
||||
project_id: str = "",
|
||||
**_: Any,
|
||||
):
|
||||
# `api_key` here is a dummy — real auth is the OAuth access token
|
||||
# fetched on every call via agent.google_oauth.get_valid_access_token().
|
||||
# We accept the kwarg for openai.OpenAI interface parity.
|
||||
self.api_key = api_key or "google-oauth"
|
||||
self.base_url = base_url or MARKER_BASE_URL
|
||||
self._default_headers = dict(default_headers or {})
|
||||
self._configured_project_id = project_id
|
||||
self._project_context: Optional[ProjectContext] = None
|
||||
self._project_context_lock = False # simple single-thread guard
|
||||
self.chat = _GeminiChatNamespace(self)
|
||||
self.is_closed = False
|
||||
self._http = httpx.Client(timeout=httpx.Timeout(connect=15.0, read=600.0, write=30.0, pool=30.0))
|
||||
|
||||
def close(self) -> None:
|
||||
self.is_closed = True
|
||||
try:
|
||||
self._http.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Implement the OpenAI SDK's context-manager-ish closure check
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
def _ensure_project_context(self, access_token: str, model: str) -> ProjectContext:
|
||||
"""Lazily resolve and cache the project context for this client."""
|
||||
if self._project_context is not None:
|
||||
return self._project_context
|
||||
|
||||
env_project = google_oauth.resolve_project_id_from_env()
|
||||
creds = google_oauth.load_credentials()
|
||||
stored_project = creds.project_id if creds else ""
|
||||
|
||||
# Prefer what's already baked into the creds
|
||||
if stored_project:
|
||||
self._project_context = ProjectContext(
|
||||
project_id=stored_project,
|
||||
managed_project_id=creds.managed_project_id if creds else "",
|
||||
tier_id="",
|
||||
source="stored",
|
||||
)
|
||||
return self._project_context
|
||||
|
||||
ctx = resolve_project_context(
|
||||
access_token,
|
||||
configured_project_id=self._configured_project_id,
|
||||
env_project_id=env_project,
|
||||
user_agent_model=model,
|
||||
)
|
||||
# Persist discovered project back to the creds file so the next
|
||||
# session doesn't re-run the discovery.
|
||||
if ctx.project_id or ctx.managed_project_id:
|
||||
google_oauth.update_project_ids(
|
||||
project_id=ctx.project_id,
|
||||
managed_project_id=ctx.managed_project_id,
|
||||
)
|
||||
self._project_context = ctx
|
||||
return ctx
|
||||
|
||||
def _create_chat_completion(
|
||||
self,
|
||||
*,
|
||||
model: str = "gemini-2.5-flash",
|
||||
messages: Optional[List[Dict[str, Any]]] = None,
|
||||
stream: bool = False,
|
||||
tools: Any = None,
|
||||
tool_choice: Any = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
stop: Any = None,
|
||||
extra_body: Optional[Dict[str, Any]] = None,
|
||||
timeout: Any = None,
|
||||
**_: Any,
|
||||
) -> Any:
|
||||
access_token = google_oauth.get_valid_access_token()
|
||||
ctx = self._ensure_project_context(access_token, model)
|
||||
|
||||
thinking_config = None
|
||||
if isinstance(extra_body, dict):
|
||||
thinking_config = extra_body.get("thinking_config") or extra_body.get("thinkingConfig")
|
||||
|
||||
inner = build_gemini_request(
|
||||
messages=messages or [],
|
||||
tools=tools,
|
||||
tool_choice=tool_choice,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
top_p=top_p,
|
||||
stop=stop,
|
||||
thinking_config=thinking_config,
|
||||
)
|
||||
wrapped = wrap_code_assist_request(
|
||||
project_id=ctx.project_id,
|
||||
model=model,
|
||||
inner_request=inner,
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"User-Agent": "hermes-agent (gemini-cli-compat)",
|
||||
"X-Goog-Api-Client": "gl-python/hermes",
|
||||
"x-activity-request-id": str(uuid.uuid4()),
|
||||
}
|
||||
headers.update(self._default_headers)
|
||||
|
||||
if stream:
|
||||
return self._stream_completion(model=model, wrapped=wrapped, headers=headers)
|
||||
|
||||
url = f"{CODE_ASSIST_ENDPOINT}/v1internal:generateContent"
|
||||
response = self._http.post(url, json=wrapped, headers=headers)
|
||||
if response.status_code != 200:
|
||||
raise _gemini_http_error(response)
|
||||
try:
|
||||
payload = response.json()
|
||||
except ValueError as exc:
|
||||
raise CodeAssistError(
|
||||
f"Invalid JSON from Code Assist: {exc}",
|
||||
code="code_assist_invalid_json",
|
||||
) from exc
|
||||
return _translate_gemini_response(payload, model=model)
|
||||
|
||||
def _stream_completion(
|
||||
self,
|
||||
*,
|
||||
model: str,
|
||||
wrapped: Dict[str, Any],
|
||||
headers: Dict[str, str],
|
||||
) -> Iterator[_GeminiStreamChunk]:
|
||||
"""Generator that yields OpenAI-shaped streaming chunks."""
|
||||
url = f"{CODE_ASSIST_ENDPOINT}/v1internal:streamGenerateContent?alt=sse"
|
||||
stream_headers = dict(headers)
|
||||
stream_headers["Accept"] = "text/event-stream"
|
||||
|
||||
def _generator() -> Iterator[_GeminiStreamChunk]:
|
||||
try:
|
||||
with self._http.stream("POST", url, json=wrapped, headers=stream_headers) as response:
|
||||
if response.status_code != 200:
|
||||
# Materialize error body for better diagnostics
|
||||
response.read()
|
||||
raise _gemini_http_error(response)
|
||||
tool_call_counter: List[int] = [0]
|
||||
for event in _iter_sse_events(response):
|
||||
for chunk in _translate_stream_event(event, model, tool_call_counter):
|
||||
yield chunk
|
||||
except httpx.HTTPError as exc:
|
||||
raise CodeAssistError(
|
||||
f"Streaming request failed: {exc}",
|
||||
code="code_assist_stream_error",
|
||||
) from exc
|
||||
|
||||
return _generator()
|
||||
|
||||
|
||||
def _gemini_http_error(response: httpx.Response) -> CodeAssistError:
|
||||
"""Translate an httpx response into a CodeAssistError with rich metadata.
|
||||
|
||||
Parses Google's error envelope (``{"error": {"code", "message", "status",
|
||||
"details": [...]}}``) so the agent's error classifier can reason about
|
||||
the failure — ``status_code`` enables the rate_limit / auth classification
|
||||
paths, and ``response`` lets the main loop honor ``Retry-After`` just
|
||||
like it does for OpenAI SDK exceptions.
|
||||
|
||||
Also lifts a few recognizable Google conditions into human-readable
|
||||
messages so the user sees something better than a 500-char JSON dump:
|
||||
|
||||
MODEL_CAPACITY_EXHAUSTED → "Gemini model capacity exhausted for
|
||||
<model>. This is a Google-side throttle..."
|
||||
RESOURCE_EXHAUSTED w/o reason → quota-style message
|
||||
404 → "Model <name> not found at cloudcode-pa..."
|
||||
"""
|
||||
status = response.status_code
|
||||
|
||||
# Parse the body once, surviving any weird encodings.
|
||||
body_text = ""
|
||||
body_json: Dict[str, Any] = {}
|
||||
try:
|
||||
body_text = response.text
|
||||
except Exception:
|
||||
body_text = ""
|
||||
if body_text:
|
||||
try:
|
||||
parsed = json.loads(body_text)
|
||||
if isinstance(parsed, dict):
|
||||
body_json = parsed
|
||||
except (ValueError, TypeError):
|
||||
body_json = {}
|
||||
|
||||
# Dig into Google's error envelope. Shape is:
|
||||
# {"error": {"code": 429, "message": "...", "status": "RESOURCE_EXHAUSTED",
|
||||
# "details": [{"@type": ".../ErrorInfo", "reason": "MODEL_CAPACITY_EXHAUSTED",
|
||||
# "metadata": {...}},
|
||||
# {"@type": ".../RetryInfo", "retryDelay": "30s"}]}}
|
||||
err_obj = body_json.get("error") if isinstance(body_json, dict) else None
|
||||
if not isinstance(err_obj, dict):
|
||||
err_obj = {}
|
||||
err_status = str(err_obj.get("status") or "").strip()
|
||||
err_message = str(err_obj.get("message") or "").strip()
|
||||
_raw_details = err_obj.get("details")
|
||||
err_details_list = _raw_details if isinstance(_raw_details, list) else []
|
||||
|
||||
# Extract google.rpc.ErrorInfo reason + metadata. There may be more
|
||||
# than one ErrorInfo (rare), so we pick the first one with a reason.
|
||||
error_reason = ""
|
||||
error_metadata: Dict[str, Any] = {}
|
||||
retry_delay_seconds: Optional[float] = None
|
||||
for detail in err_details_list:
|
||||
if not isinstance(detail, dict):
|
||||
continue
|
||||
type_url = str(detail.get("@type") or "")
|
||||
if not error_reason and type_url.endswith("/google.rpc.ErrorInfo"):
|
||||
reason = detail.get("reason")
|
||||
if isinstance(reason, str) and reason:
|
||||
error_reason = reason
|
||||
md = detail.get("metadata")
|
||||
if isinstance(md, dict):
|
||||
error_metadata = md
|
||||
elif retry_delay_seconds is None and type_url.endswith("/google.rpc.RetryInfo"):
|
||||
# retryDelay is a google.protobuf.Duration string like "30s" or "1.5s".
|
||||
delay_raw = detail.get("retryDelay")
|
||||
if isinstance(delay_raw, str) and delay_raw.endswith("s"):
|
||||
try:
|
||||
retry_delay_seconds = float(delay_raw[:-1])
|
||||
except ValueError:
|
||||
pass
|
||||
elif isinstance(delay_raw, (int, float)):
|
||||
retry_delay_seconds = float(delay_raw)
|
||||
|
||||
# Fall back to the Retry-After header if the body didn't include RetryInfo.
|
||||
if retry_delay_seconds is None:
|
||||
try:
|
||||
header_val = response.headers.get("Retry-After") or response.headers.get("retry-after")
|
||||
except Exception:
|
||||
header_val = None
|
||||
if header_val:
|
||||
try:
|
||||
retry_delay_seconds = float(header_val)
|
||||
except (TypeError, ValueError):
|
||||
retry_delay_seconds = None
|
||||
|
||||
# Classify the error code. ``code_assist_rate_limited`` stays the default
|
||||
# for 429s; a more specific reason tag helps downstream callers (e.g. tests,
|
||||
# logs) without changing the rate_limit classification path.
|
||||
code = f"code_assist_http_{status}"
|
||||
if status == 401:
|
||||
code = "code_assist_unauthorized"
|
||||
elif status == 429:
|
||||
code = "code_assist_rate_limited"
|
||||
if error_reason == "MODEL_CAPACITY_EXHAUSTED":
|
||||
code = "code_assist_capacity_exhausted"
|
||||
|
||||
# Build a human-readable message. Keep the status + a raw-body tail for
|
||||
# debugging, but lead with a friendlier summary when we recognize the
|
||||
# Google signal.
|
||||
model_hint = ""
|
||||
if isinstance(error_metadata, dict):
|
||||
model_hint = str(error_metadata.get("model") or error_metadata.get("modelId") or "").strip()
|
||||
|
||||
if status == 429 and error_reason == "MODEL_CAPACITY_EXHAUSTED":
|
||||
target = model_hint or "this Gemini model"
|
||||
message = (
|
||||
f"Gemini capacity exhausted for {target} (Google-side throttle, "
|
||||
f"not a Hermes issue). Try a different Gemini model or set a "
|
||||
f"fallback_providers entry to a non-Gemini provider."
|
||||
)
|
||||
if retry_delay_seconds is not None:
|
||||
message += f" Google suggests retrying in {retry_delay_seconds:g}s."
|
||||
elif status == 429 and err_status == "RESOURCE_EXHAUSTED":
|
||||
message = (
|
||||
f"Gemini quota exhausted ({err_message or 'RESOURCE_EXHAUSTED'}). "
|
||||
f"Check /gquota for remaining daily requests."
|
||||
)
|
||||
if retry_delay_seconds is not None:
|
||||
message += f" Retry suggested in {retry_delay_seconds:g}s."
|
||||
elif status == 404:
|
||||
# Google returns 404 when a model has been retired or renamed.
|
||||
target = model_hint or (err_message or "model")
|
||||
message = (
|
||||
f"Code Assist 404: {target} is not available at "
|
||||
f"cloudcode-pa.googleapis.com. It may have been renamed or "
|
||||
f"retired. Check hermes_cli/models.py for the current list."
|
||||
)
|
||||
elif err_message:
|
||||
# Generic fallback with the parsed message.
|
||||
message = f"Code Assist HTTP {status} ({err_status or 'error'}): {err_message}"
|
||||
else:
|
||||
# Last-ditch fallback — raw body snippet.
|
||||
message = f"Code Assist returned HTTP {status}: {body_text[:500]}"
|
||||
|
||||
return CodeAssistError(
|
||||
message,
|
||||
code=code,
|
||||
status_code=status,
|
||||
response=response,
|
||||
retry_after=retry_delay_seconds,
|
||||
details={
|
||||
"status": err_status,
|
||||
"reason": error_reason,
|
||||
"metadata": error_metadata,
|
||||
"message": err_message,
|
||||
},
|
||||
)
|
||||
@@ -1,965 +0,0 @@
|
||||
"""OpenAI-compatible facade over Google AI Studio's native Gemini API.
|
||||
|
||||
Hermes keeps ``api_mode='chat_completions'`` for the ``gemini`` provider so the
|
||||
main agent loop can keep using its existing OpenAI-shaped message flow.
|
||||
This adapter is the transport shim that converts those OpenAI-style
|
||||
``messages[]`` / ``tools[]`` requests into Gemini's native
|
||||
``models/{model}:generateContent`` schema and converts the responses back.
|
||||
|
||||
Why this exists
|
||||
---------------
|
||||
Google's OpenAI-compatible endpoint has been brittle for Hermes's multi-turn
|
||||
agent/tool loop (auth churn, tool-call replay quirks, thought-signature
|
||||
requirements). The native Gemini API is the canonical path and avoids the
|
||||
OpenAI-compat layer entirely.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Dict, Iterator, List, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from agent.gemini_schema import sanitize_gemini_tool_parameters
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta"
|
||||
|
||||
|
||||
def is_native_gemini_base_url(base_url: str) -> bool:
|
||||
"""Return True when the endpoint speaks Gemini's native REST API."""
|
||||
normalized = str(base_url or "").strip().rstrip("/").lower()
|
||||
if not normalized:
|
||||
return False
|
||||
if "generativelanguage.googleapis.com" not in normalized:
|
||||
return False
|
||||
return not normalized.endswith("/openai")
|
||||
|
||||
|
||||
def probe_gemini_tier(
|
||||
api_key: str,
|
||||
base_url: str = DEFAULT_GEMINI_BASE_URL,
|
||||
*,
|
||||
model: str = "gemini-2.5-flash",
|
||||
timeout: float = 10.0,
|
||||
) -> str:
|
||||
"""Probe a Google AI Studio API key and return its tier.
|
||||
|
||||
Returns one of:
|
||||
|
||||
- ``"free"`` -- key is on the free tier (unusable with Hermes)
|
||||
- ``"paid"`` -- key is on a paid tier
|
||||
- ``"unknown"`` -- probe failed; callers should proceed without blocking.
|
||||
"""
|
||||
key = (api_key or "").strip()
|
||||
if not key:
|
||||
return "unknown"
|
||||
|
||||
normalized_base = str(base_url or DEFAULT_GEMINI_BASE_URL).strip().rstrip("/")
|
||||
if not normalized_base:
|
||||
normalized_base = DEFAULT_GEMINI_BASE_URL
|
||||
if normalized_base.lower().endswith("/openai"):
|
||||
normalized_base = normalized_base[: -len("/openai")]
|
||||
|
||||
url = f"{normalized_base}/models/{model}:generateContent"
|
||||
payload = {
|
||||
"contents": [{"role": "user", "parts": [{"text": "hi"}]}],
|
||||
"generationConfig": {"maxOutputTokens": 1},
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=timeout) as client:
|
||||
resp = client.post(
|
||||
url,
|
||||
params={"key": key},
|
||||
json=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug("probe_gemini_tier: network error: %s", exc)
|
||||
return "unknown"
|
||||
|
||||
headers_lower = {k.lower(): v for k, v in resp.headers.items()}
|
||||
rpd_header = headers_lower.get("x-ratelimit-limit-requests-per-day")
|
||||
if rpd_header:
|
||||
try:
|
||||
rpd_val = int(rpd_header)
|
||||
except (TypeError, ValueError):
|
||||
rpd_val = None
|
||||
# Published free-tier daily caps (Dec 2025):
|
||||
# gemini-2.5-pro: 100, gemini-2.5-flash: 250, flash-lite: 1000
|
||||
# Tier 1 starts at ~1500+ for Flash. We treat <= 1000 as free.
|
||||
if rpd_val is not None and rpd_val <= 1000:
|
||||
return "free"
|
||||
if rpd_val is not None and rpd_val > 1000:
|
||||
return "paid"
|
||||
|
||||
if resp.status_code == 429:
|
||||
body_text = ""
|
||||
try:
|
||||
body_text = resp.text or ""
|
||||
except Exception:
|
||||
body_text = ""
|
||||
if "free_tier" in body_text.lower():
|
||||
return "free"
|
||||
return "paid"
|
||||
|
||||
if 200 <= resp.status_code < 300:
|
||||
return "paid"
|
||||
|
||||
return "unknown"
|
||||
|
||||
|
||||
def is_free_tier_quota_error(error_message: str) -> bool:
|
||||
"""Return True when a Gemini 429 message indicates free-tier exhaustion."""
|
||||
if not error_message:
|
||||
return False
|
||||
return "free_tier" in error_message.lower()
|
||||
|
||||
|
||||
_FREE_TIER_GUIDANCE = (
|
||||
"\n\nYour Google API key is on the free tier (<= 250 requests/day for "
|
||||
"gemini-2.5-flash). Hermes typically makes 3-10 API calls per user turn, "
|
||||
"so the free tier is exhausted in a handful of messages and cannot sustain "
|
||||
"an agent session. Enable billing on your Google Cloud project and "
|
||||
"regenerate the key in a billing-enabled project: "
|
||||
"https://aistudio.google.com/apikey"
|
||||
)
|
||||
|
||||
|
||||
class GeminiAPIError(Exception):
|
||||
"""Error shape compatible with Hermes retry/error classification."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
*,
|
||||
code: str = "gemini_api_error",
|
||||
status_code: Optional[int] = None,
|
||||
response: Optional[httpx.Response] = None,
|
||||
retry_after: Optional[float] = None,
|
||||
details: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.status_code = status_code
|
||||
self.response = response
|
||||
self.retry_after = retry_after
|
||||
self.details = details or {}
|
||||
|
||||
|
||||
def _coerce_content_to_text(content: Any) -> str:
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
pieces: List[str] = []
|
||||
for part in content:
|
||||
if isinstance(part, str):
|
||||
pieces.append(part)
|
||||
elif isinstance(part, dict) and part.get("type") == "text":
|
||||
text = part.get("text")
|
||||
if isinstance(text, str):
|
||||
pieces.append(text)
|
||||
return "\n".join(pieces)
|
||||
return str(content)
|
||||
|
||||
|
||||
def _extract_multimodal_parts(content: Any) -> List[Dict[str, Any]]:
|
||||
if not isinstance(content, list):
|
||||
text = _coerce_content_to_text(content)
|
||||
return [{"text": text}] if text else []
|
||||
|
||||
parts: List[Dict[str, Any]] = []
|
||||
for item in content:
|
||||
if isinstance(item, str):
|
||||
parts.append({"text": item})
|
||||
continue
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
ptype = item.get("type")
|
||||
if ptype == "text":
|
||||
text = item.get("text")
|
||||
if isinstance(text, str) and text:
|
||||
parts.append({"text": text})
|
||||
elif ptype == "image_url":
|
||||
url = ((item.get("image_url") or {}).get("url") or "")
|
||||
if not isinstance(url, str) or not url.startswith("data:"):
|
||||
continue
|
||||
try:
|
||||
header, encoded = url.split(",", 1)
|
||||
mime = header.split(":", 1)[1].split(";", 1)[0]
|
||||
raw = base64.b64decode(encoded)
|
||||
except Exception:
|
||||
continue
|
||||
parts.append(
|
||||
{
|
||||
"inlineData": {
|
||||
"mimeType": mime,
|
||||
"data": base64.b64encode(raw).decode("ascii"),
|
||||
}
|
||||
}
|
||||
)
|
||||
return parts
|
||||
|
||||
|
||||
def _tool_call_extra_signature(tool_call: Dict[str, Any]) -> Optional[str]:
|
||||
extra = tool_call.get("extra_content") or {}
|
||||
if not isinstance(extra, dict):
|
||||
return None
|
||||
google = extra.get("google") or extra.get("thought_signature")
|
||||
if isinstance(google, dict):
|
||||
sig = google.get("thought_signature") or google.get("thoughtSignature")
|
||||
return str(sig) if isinstance(sig, str) and sig else None
|
||||
if isinstance(google, str) and google:
|
||||
return google
|
||||
return None
|
||||
|
||||
|
||||
def _translate_tool_call_to_gemini(tool_call: Dict[str, Any]) -> Dict[str, Any]:
|
||||
fn = tool_call.get("function") or {}
|
||||
args_raw = fn.get("arguments", "")
|
||||
try:
|
||||
args = json.loads(args_raw) if isinstance(args_raw, str) and args_raw else {}
|
||||
except json.JSONDecodeError:
|
||||
args = {"_raw": args_raw}
|
||||
if not isinstance(args, dict):
|
||||
args = {"_value": args}
|
||||
|
||||
part: Dict[str, Any] = {
|
||||
"functionCall": {
|
||||
"name": str(fn.get("name") or ""),
|
||||
"args": args,
|
||||
}
|
||||
}
|
||||
thought_signature = _tool_call_extra_signature(tool_call)
|
||||
if thought_signature:
|
||||
part["thoughtSignature"] = thought_signature
|
||||
return part
|
||||
|
||||
|
||||
def _translate_tool_result_to_gemini(
|
||||
message: Dict[str, Any],
|
||||
tool_name_by_call_id: Optional[Dict[str, str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
tool_name_by_call_id = tool_name_by_call_id or {}
|
||||
tool_call_id = str(message.get("tool_call_id") or "")
|
||||
name = str(
|
||||
message.get("name")
|
||||
or tool_name_by_call_id.get(tool_call_id)
|
||||
or tool_call_id
|
||||
or "tool"
|
||||
)
|
||||
content = _coerce_content_to_text(message.get("content"))
|
||||
try:
|
||||
parsed = json.loads(content) if content.strip().startswith(("{", "[")) else None
|
||||
except json.JSONDecodeError:
|
||||
parsed = None
|
||||
response = parsed if isinstance(parsed, dict) else {"output": content}
|
||||
return {
|
||||
"functionResponse": {
|
||||
"name": name,
|
||||
"response": response,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _build_gemini_contents(messages: List[Dict[str, Any]]) -> tuple[List[Dict[str, Any]], Optional[Dict[str, Any]]]:
|
||||
system_text_parts: List[str] = []
|
||||
contents: List[Dict[str, Any]] = []
|
||||
tool_name_by_call_id: Dict[str, str] = {}
|
||||
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
role = str(msg.get("role") or "user")
|
||||
|
||||
if role == "system":
|
||||
system_text_parts.append(_coerce_content_to_text(msg.get("content")))
|
||||
continue
|
||||
|
||||
if role in {"tool", "function"}:
|
||||
contents.append(
|
||||
{
|
||||
"role": "user",
|
||||
"parts": [
|
||||
_translate_tool_result_to_gemini(
|
||||
msg,
|
||||
tool_name_by_call_id=tool_name_by_call_id,
|
||||
)
|
||||
],
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
gemini_role = "model" if role == "assistant" else "user"
|
||||
parts: List[Dict[str, Any]] = []
|
||||
|
||||
content_parts = _extract_multimodal_parts(msg.get("content"))
|
||||
parts.extend(content_parts)
|
||||
|
||||
tool_calls = msg.get("tool_calls") or []
|
||||
if isinstance(tool_calls, list):
|
||||
for tool_call in tool_calls:
|
||||
if isinstance(tool_call, dict):
|
||||
tool_call_id = str(tool_call.get("id") or tool_call.get("call_id") or "")
|
||||
tool_name = str(((tool_call.get("function") or {}).get("name") or ""))
|
||||
if tool_call_id and tool_name:
|
||||
tool_name_by_call_id[tool_call_id] = tool_name
|
||||
parts.append(_translate_tool_call_to_gemini(tool_call))
|
||||
|
||||
if parts:
|
||||
contents.append({"role": gemini_role, "parts": parts})
|
||||
|
||||
system_instruction = None
|
||||
joined_system = "\n".join(part for part in system_text_parts if part).strip()
|
||||
if joined_system:
|
||||
system_instruction = {"parts": [{"text": joined_system}]}
|
||||
return contents, system_instruction
|
||||
|
||||
|
||||
def _translate_tools_to_gemini(tools: Any) -> List[Dict[str, Any]]:
|
||||
if not isinstance(tools, list):
|
||||
return []
|
||||
declarations: List[Dict[str, Any]] = []
|
||||
for tool in tools:
|
||||
if not isinstance(tool, dict):
|
||||
continue
|
||||
fn = tool.get("function") or {}
|
||||
if not isinstance(fn, dict):
|
||||
continue
|
||||
name = fn.get("name")
|
||||
if not isinstance(name, str) or not name:
|
||||
continue
|
||||
decl: Dict[str, Any] = {"name": name}
|
||||
description = fn.get("description")
|
||||
if isinstance(description, str) and description:
|
||||
decl["description"] = description
|
||||
parameters = fn.get("parameters")
|
||||
if isinstance(parameters, dict):
|
||||
decl["parameters"] = sanitize_gemini_tool_parameters(parameters)
|
||||
declarations.append(decl)
|
||||
return [{"functionDeclarations": declarations}] if declarations else []
|
||||
|
||||
|
||||
def _translate_tool_choice_to_gemini(tool_choice: Any) -> Optional[Dict[str, Any]]:
|
||||
if tool_choice is None:
|
||||
return None
|
||||
if isinstance(tool_choice, str):
|
||||
if tool_choice == "auto":
|
||||
return {"functionCallingConfig": {"mode": "AUTO"}}
|
||||
if tool_choice == "required":
|
||||
return {"functionCallingConfig": {"mode": "ANY"}}
|
||||
if tool_choice == "none":
|
||||
return {"functionCallingConfig": {"mode": "NONE"}}
|
||||
if isinstance(tool_choice, dict):
|
||||
fn = tool_choice.get("function") or {}
|
||||
name = fn.get("name")
|
||||
if isinstance(name, str) and name:
|
||||
return {"functionCallingConfig": {"mode": "ANY", "allowedFunctionNames": [name]}}
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_thinking_config(config: Any) -> Optional[Dict[str, Any]]:
|
||||
if not isinstance(config, dict) or not config:
|
||||
return None
|
||||
budget = config.get("thinkingBudget", config.get("thinking_budget"))
|
||||
include = config.get("includeThoughts", config.get("include_thoughts"))
|
||||
level = config.get("thinkingLevel", config.get("thinking_level"))
|
||||
normalized: Dict[str, Any] = {}
|
||||
if isinstance(budget, (int, float)):
|
||||
normalized["thinkingBudget"] = int(budget)
|
||||
if isinstance(include, bool):
|
||||
normalized["includeThoughts"] = include
|
||||
if isinstance(level, str) and level.strip():
|
||||
normalized["thinkingLevel"] = level.strip().lower()
|
||||
return normalized or None
|
||||
|
||||
|
||||
def build_gemini_request(
|
||||
*,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Any = None,
|
||||
tool_choice: Any = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
stop: Any = None,
|
||||
thinking_config: Any = None,
|
||||
) -> Dict[str, Any]:
|
||||
contents, system_instruction = _build_gemini_contents(messages)
|
||||
request: Dict[str, Any] = {"contents": contents}
|
||||
if system_instruction:
|
||||
request["systemInstruction"] = system_instruction
|
||||
|
||||
gemini_tools = _translate_tools_to_gemini(tools)
|
||||
if gemini_tools:
|
||||
request["tools"] = gemini_tools
|
||||
|
||||
tool_config = _translate_tool_choice_to_gemini(tool_choice)
|
||||
if tool_config:
|
||||
request["toolConfig"] = tool_config
|
||||
|
||||
generation_config: Dict[str, Any] = {}
|
||||
if temperature is not None:
|
||||
generation_config["temperature"] = temperature
|
||||
if max_tokens is not None:
|
||||
generation_config["maxOutputTokens"] = max_tokens
|
||||
if top_p is not None:
|
||||
generation_config["topP"] = top_p
|
||||
if stop:
|
||||
generation_config["stopSequences"] = stop if isinstance(stop, list) else [str(stop)]
|
||||
normalized_thinking = _normalize_thinking_config(thinking_config)
|
||||
if normalized_thinking:
|
||||
generation_config["thinkingConfig"] = normalized_thinking
|
||||
if generation_config:
|
||||
request["generationConfig"] = generation_config
|
||||
|
||||
return request
|
||||
|
||||
|
||||
def _map_gemini_finish_reason(reason: str) -> str:
|
||||
mapping = {
|
||||
"STOP": "stop",
|
||||
"MAX_TOKENS": "length",
|
||||
"SAFETY": "content_filter",
|
||||
"RECITATION": "content_filter",
|
||||
"OTHER": "stop",
|
||||
}
|
||||
return mapping.get(str(reason or "").upper(), "stop")
|
||||
|
||||
|
||||
def _tool_call_extra_from_part(part: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
sig = part.get("thoughtSignature")
|
||||
if isinstance(sig, str) and sig:
|
||||
return {"google": {"thought_signature": sig}}
|
||||
return None
|
||||
|
||||
|
||||
def _empty_response(model: str) -> SimpleNamespace:
|
||||
message = SimpleNamespace(
|
||||
role="assistant",
|
||||
content="",
|
||||
tool_calls=None,
|
||||
reasoning=None,
|
||||
reasoning_content=None,
|
||||
reasoning_details=None,
|
||||
)
|
||||
choice = SimpleNamespace(index=0, message=message, finish_reason="stop")
|
||||
usage = SimpleNamespace(
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
total_tokens=0,
|
||||
prompt_tokens_details=SimpleNamespace(cached_tokens=0),
|
||||
)
|
||||
return SimpleNamespace(
|
||||
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
||||
object="chat.completion",
|
||||
created=int(time.time()),
|
||||
model=model,
|
||||
choices=[choice],
|
||||
usage=usage,
|
||||
)
|
||||
|
||||
|
||||
def translate_gemini_response(resp: Dict[str, Any], model: str) -> SimpleNamespace:
|
||||
candidates = resp.get("candidates") or []
|
||||
if not isinstance(candidates, list) or not candidates:
|
||||
return _empty_response(model)
|
||||
|
||||
cand = candidates[0] if isinstance(candidates[0], dict) else {}
|
||||
content_obj = cand.get("content") if isinstance(cand, dict) else {}
|
||||
parts = content_obj.get("parts") if isinstance(content_obj, dict) else []
|
||||
|
||||
text_pieces: List[str] = []
|
||||
reasoning_pieces: List[str] = []
|
||||
tool_calls: List[SimpleNamespace] = []
|
||||
|
||||
for index, part in enumerate(parts or []):
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
if part.get("thought") is True and isinstance(part.get("text"), str):
|
||||
reasoning_pieces.append(part["text"])
|
||||
continue
|
||||
if isinstance(part.get("text"), str):
|
||||
text_pieces.append(part["text"])
|
||||
continue
|
||||
fc = part.get("functionCall")
|
||||
if isinstance(fc, dict) and fc.get("name"):
|
||||
try:
|
||||
args_str = json.dumps(fc.get("args") or {}, ensure_ascii=False)
|
||||
except (TypeError, ValueError):
|
||||
args_str = "{}"
|
||||
tool_call = SimpleNamespace(
|
||||
id=f"call_{uuid.uuid4().hex[:12]}",
|
||||
type="function",
|
||||
index=index,
|
||||
function=SimpleNamespace(name=str(fc["name"]), arguments=args_str),
|
||||
)
|
||||
extra_content = _tool_call_extra_from_part(part)
|
||||
if extra_content:
|
||||
tool_call.extra_content = extra_content
|
||||
tool_calls.append(tool_call)
|
||||
|
||||
finish_reason = "tool_calls" if tool_calls else _map_gemini_finish_reason(str(cand.get("finishReason") or ""))
|
||||
usage_meta = resp.get("usageMetadata") or {}
|
||||
usage = SimpleNamespace(
|
||||
prompt_tokens=int(usage_meta.get("promptTokenCount") or 0),
|
||||
completion_tokens=int(usage_meta.get("candidatesTokenCount") or 0),
|
||||
total_tokens=int(usage_meta.get("totalTokenCount") or 0),
|
||||
prompt_tokens_details=SimpleNamespace(
|
||||
cached_tokens=int(usage_meta.get("cachedContentTokenCount") or 0),
|
||||
),
|
||||
)
|
||||
reasoning = "".join(reasoning_pieces) or None
|
||||
message = SimpleNamespace(
|
||||
role="assistant",
|
||||
content="".join(text_pieces) if text_pieces else None,
|
||||
tool_calls=tool_calls or None,
|
||||
reasoning=reasoning,
|
||||
reasoning_content=reasoning,
|
||||
reasoning_details=None,
|
||||
)
|
||||
choice = SimpleNamespace(index=0, message=message, finish_reason=finish_reason)
|
||||
return SimpleNamespace(
|
||||
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
||||
object="chat.completion",
|
||||
created=int(time.time()),
|
||||
model=model,
|
||||
choices=[choice],
|
||||
usage=usage,
|
||||
)
|
||||
|
||||
|
||||
class _GeminiStreamChunk(SimpleNamespace):
|
||||
pass
|
||||
|
||||
|
||||
def _make_stream_chunk(
|
||||
*,
|
||||
model: str,
|
||||
content: str = "",
|
||||
tool_call_delta: Optional[Dict[str, Any]] = None,
|
||||
finish_reason: Optional[str] = None,
|
||||
reasoning: str = "",
|
||||
) -> _GeminiStreamChunk:
|
||||
delta_kwargs: Dict[str, Any] = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"reasoning": None,
|
||||
"reasoning_content": None,
|
||||
}
|
||||
if content:
|
||||
delta_kwargs["content"] = content
|
||||
if tool_call_delta is not None:
|
||||
tool_delta = SimpleNamespace(
|
||||
index=tool_call_delta.get("index", 0),
|
||||
id=tool_call_delta.get("id") or f"call_{uuid.uuid4().hex[:12]}",
|
||||
type="function",
|
||||
function=SimpleNamespace(
|
||||
name=tool_call_delta.get("name") or "",
|
||||
arguments=tool_call_delta.get("arguments") or "",
|
||||
),
|
||||
)
|
||||
extra_content = tool_call_delta.get("extra_content")
|
||||
if isinstance(extra_content, dict):
|
||||
tool_delta.extra_content = extra_content
|
||||
delta_kwargs["tool_calls"] = [tool_delta]
|
||||
if reasoning:
|
||||
delta_kwargs["reasoning"] = reasoning
|
||||
delta_kwargs["reasoning_content"] = reasoning
|
||||
delta = SimpleNamespace(**delta_kwargs)
|
||||
choice = SimpleNamespace(index=0, delta=delta, finish_reason=finish_reason)
|
||||
return _GeminiStreamChunk(
|
||||
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
|
||||
object="chat.completion.chunk",
|
||||
created=int(time.time()),
|
||||
model=model,
|
||||
choices=[choice],
|
||||
usage=None,
|
||||
)
|
||||
|
||||
|
||||
def _iter_sse_events(response: httpx.Response) -> Iterator[Dict[str, Any]]:
|
||||
buffer = ""
|
||||
for chunk in response.iter_text():
|
||||
if not chunk:
|
||||
continue
|
||||
buffer += chunk
|
||||
while "\n" in buffer:
|
||||
line, buffer = buffer.split("\n", 1)
|
||||
line = line.rstrip("\r")
|
||||
if not line:
|
||||
continue
|
||||
if not line.startswith("data: "):
|
||||
continue
|
||||
data = line[6:]
|
||||
if data == "[DONE]":
|
||||
return
|
||||
try:
|
||||
payload = json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
logger.debug("Non-JSON Gemini SSE line: %s", data[:200])
|
||||
continue
|
||||
if isinstance(payload, dict):
|
||||
yield payload
|
||||
|
||||
|
||||
def translate_stream_event(event: Dict[str, Any], model: str, tool_call_indices: Dict[str, Dict[str, Any]]) -> List[_GeminiStreamChunk]:
|
||||
candidates = event.get("candidates") or []
|
||||
if not candidates:
|
||||
return []
|
||||
cand = candidates[0] if isinstance(candidates[0], dict) else {}
|
||||
parts = ((cand.get("content") or {}).get("parts") or []) if isinstance(cand, dict) else []
|
||||
chunks: List[_GeminiStreamChunk] = []
|
||||
|
||||
for part_index, part in enumerate(parts):
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
if part.get("thought") is True and isinstance(part.get("text"), str):
|
||||
chunks.append(_make_stream_chunk(model=model, reasoning=part["text"]))
|
||||
continue
|
||||
if isinstance(part.get("text"), str) and part["text"]:
|
||||
chunks.append(_make_stream_chunk(model=model, content=part["text"]))
|
||||
fc = part.get("functionCall")
|
||||
if isinstance(fc, dict) and fc.get("name"):
|
||||
name = str(fc["name"])
|
||||
try:
|
||||
args_str = json.dumps(fc.get("args") or {}, ensure_ascii=False, sort_keys=True)
|
||||
except (TypeError, ValueError):
|
||||
args_str = "{}"
|
||||
thought_signature = part.get("thoughtSignature") if isinstance(part.get("thoughtSignature"), str) else ""
|
||||
call_key = json.dumps(
|
||||
{
|
||||
"part_index": part_index,
|
||||
"name": name,
|
||||
"thought_signature": thought_signature,
|
||||
},
|
||||
sort_keys=True,
|
||||
)
|
||||
slot = tool_call_indices.get(call_key)
|
||||
if slot is None:
|
||||
slot = {
|
||||
"index": len(tool_call_indices),
|
||||
"id": f"call_{uuid.uuid4().hex[:12]}",
|
||||
"last_arguments": "",
|
||||
}
|
||||
tool_call_indices[call_key] = slot
|
||||
emitted_arguments = args_str
|
||||
last_arguments = str(slot.get("last_arguments") or "")
|
||||
if last_arguments:
|
||||
if args_str == last_arguments:
|
||||
emitted_arguments = ""
|
||||
elif args_str.startswith(last_arguments):
|
||||
emitted_arguments = args_str[len(last_arguments):]
|
||||
slot["last_arguments"] = args_str
|
||||
chunks.append(
|
||||
_make_stream_chunk(
|
||||
model=model,
|
||||
tool_call_delta={
|
||||
"index": slot["index"],
|
||||
"id": slot["id"],
|
||||
"name": name,
|
||||
"arguments": emitted_arguments,
|
||||
"extra_content": _tool_call_extra_from_part(part),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
finish_reason_raw = str(cand.get("finishReason") or "")
|
||||
if finish_reason_raw:
|
||||
mapped = "tool_calls" if tool_call_indices else _map_gemini_finish_reason(finish_reason_raw)
|
||||
finish_chunk = _make_stream_chunk(model=model, finish_reason=mapped)
|
||||
# Attach usage from this event's usageMetadata so the streaming
|
||||
# loop in run_agent.py can record token counts (mirrors the
|
||||
# non-streaming path in translate_gemini_response).
|
||||
usage_meta = event.get("usageMetadata") or {}
|
||||
if usage_meta:
|
||||
finish_chunk.usage = SimpleNamespace(
|
||||
prompt_tokens=int(usage_meta.get("promptTokenCount") or 0),
|
||||
completion_tokens=int(usage_meta.get("candidatesTokenCount") or 0),
|
||||
total_tokens=int(usage_meta.get("totalTokenCount") or 0),
|
||||
prompt_tokens_details=SimpleNamespace(
|
||||
cached_tokens=int(usage_meta.get("cachedContentTokenCount") or 0),
|
||||
),
|
||||
)
|
||||
chunks.append(finish_chunk)
|
||||
return chunks
|
||||
|
||||
|
||||
def gemini_http_error(response: httpx.Response) -> GeminiAPIError:
|
||||
status = response.status_code
|
||||
body_text = ""
|
||||
body_json: Dict[str, Any] = {}
|
||||
try:
|
||||
body_text = response.text
|
||||
except Exception:
|
||||
body_text = ""
|
||||
if body_text:
|
||||
try:
|
||||
parsed = json.loads(body_text)
|
||||
if isinstance(parsed, dict):
|
||||
body_json = parsed
|
||||
except (ValueError, TypeError):
|
||||
body_json = {}
|
||||
|
||||
err_obj = body_json.get("error") if isinstance(body_json, dict) else None
|
||||
if not isinstance(err_obj, dict):
|
||||
err_obj = {}
|
||||
err_status = str(err_obj.get("status") or "").strip()
|
||||
err_message = str(err_obj.get("message") or "").strip()
|
||||
_raw_details = err_obj.get("details")
|
||||
details_list = _raw_details if isinstance(_raw_details, list) else []
|
||||
|
||||
reason = ""
|
||||
retry_after: Optional[float] = None
|
||||
metadata: Dict[str, Any] = {}
|
||||
for detail in details_list:
|
||||
if not isinstance(detail, dict):
|
||||
continue
|
||||
type_url = str(detail.get("@type") or "")
|
||||
if not reason and type_url.endswith("/google.rpc.ErrorInfo"):
|
||||
reason_value = detail.get("reason")
|
||||
if isinstance(reason_value, str):
|
||||
reason = reason_value
|
||||
md = detail.get("metadata")
|
||||
if isinstance(md, dict):
|
||||
metadata = md
|
||||
header_retry = response.headers.get("Retry-After") or response.headers.get("retry-after")
|
||||
if header_retry:
|
||||
try:
|
||||
retry_after = float(header_retry)
|
||||
except (TypeError, ValueError):
|
||||
retry_after = None
|
||||
|
||||
code = f"gemini_http_{status}"
|
||||
if status == 401:
|
||||
code = "gemini_unauthorized"
|
||||
elif status == 429:
|
||||
code = "gemini_rate_limited"
|
||||
elif status == 404:
|
||||
code = "gemini_model_not_found"
|
||||
|
||||
if err_message:
|
||||
message = f"Gemini HTTP {status} ({err_status or 'error'}): {err_message}"
|
||||
else:
|
||||
message = f"Gemini returned HTTP {status}: {body_text[:500]}"
|
||||
|
||||
# Free-tier quota exhaustion -> append actionable guidance so users who
|
||||
# bypassed the setup wizard (direct GOOGLE_API_KEY in .env) still learn
|
||||
# that the free tier cannot sustain an agent session.
|
||||
if status == 429 and is_free_tier_quota_error(err_message or body_text):
|
||||
message = message + _FREE_TIER_GUIDANCE
|
||||
|
||||
return GeminiAPIError(
|
||||
message,
|
||||
code=code,
|
||||
status_code=status,
|
||||
response=response,
|
||||
retry_after=retry_after,
|
||||
details={
|
||||
"status": err_status,
|
||||
"reason": reason,
|
||||
"metadata": metadata,
|
||||
"message": err_message,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class _GeminiChatCompletions:
|
||||
def __init__(self, client: "GeminiNativeClient"):
|
||||
self._client = client
|
||||
|
||||
def create(self, **kwargs: Any) -> Any:
|
||||
return self._client._create_chat_completion(**kwargs)
|
||||
|
||||
|
||||
class _AsyncGeminiChatCompletions:
|
||||
def __init__(self, client: "AsyncGeminiNativeClient"):
|
||||
self._client = client
|
||||
|
||||
async def create(self, **kwargs: Any) -> Any:
|
||||
return await self._client._create_chat_completion(**kwargs)
|
||||
|
||||
|
||||
class _GeminiChatNamespace:
|
||||
def __init__(self, client: "GeminiNativeClient"):
|
||||
self.completions = _GeminiChatCompletions(client)
|
||||
|
||||
|
||||
class _AsyncGeminiChatNamespace:
|
||||
def __init__(self, client: "AsyncGeminiNativeClient"):
|
||||
self.completions = _AsyncGeminiChatCompletions(client)
|
||||
|
||||
|
||||
class GeminiNativeClient:
|
||||
"""Minimal OpenAI-SDK-compatible facade over Gemini's native REST API."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
api_key: str,
|
||||
base_url: Optional[str] = None,
|
||||
default_headers: Optional[Dict[str, str]] = None,
|
||||
timeout: Any = None,
|
||||
http_client: Optional[httpx.Client] = None,
|
||||
**_: Any,
|
||||
) -> None:
|
||||
if not (api_key or "").strip():
|
||||
raise RuntimeError(
|
||||
"Gemini native client requires an API key, but none was provided. "
|
||||
"Set GOOGLE_API_KEY or GEMINI_API_KEY in your environment / ~/.hermes/.env "
|
||||
"(get one at https://aistudio.google.com/app/apikey), or run `hermes setup` "
|
||||
"to configure the Google provider."
|
||||
)
|
||||
self.api_key = api_key
|
||||
normalized_base = (base_url or DEFAULT_GEMINI_BASE_URL).rstrip("/")
|
||||
if normalized_base.endswith("/openai"):
|
||||
normalized_base = normalized_base[: -len("/openai")]
|
||||
self.base_url = normalized_base
|
||||
self._default_headers = dict(default_headers or {})
|
||||
self.chat = _GeminiChatNamespace(self)
|
||||
self.is_closed = False
|
||||
self._http = http_client or httpx.Client(
|
||||
timeout=timeout or httpx.Timeout(connect=15.0, read=600.0, write=30.0, pool=30.0)
|
||||
)
|
||||
|
||||
def close(self) -> None:
|
||||
self.is_closed = True
|
||||
try:
|
||||
self._http.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
def _headers(self) -> Dict[str, str]:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"x-goog-api-key": self.api_key,
|
||||
"User-Agent": "hermes-agent (gemini-native)",
|
||||
}
|
||||
headers.update(self._default_headers)
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
def _advance_stream_iterator(iterator: Iterator[_GeminiStreamChunk]) -> tuple[bool, Optional[_GeminiStreamChunk]]:
|
||||
try:
|
||||
return False, next(iterator)
|
||||
except StopIteration:
|
||||
return True, None
|
||||
|
||||
def _create_chat_completion(
|
||||
self,
|
||||
*,
|
||||
model: str = "gemini-2.5-flash",
|
||||
messages: Optional[List[Dict[str, Any]]] = None,
|
||||
stream: bool = False,
|
||||
tools: Any = None,
|
||||
tool_choice: Any = None,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
top_p: Optional[float] = None,
|
||||
stop: Any = None,
|
||||
extra_body: Optional[Dict[str, Any]] = None,
|
||||
timeout: Any = None,
|
||||
**_: Any,
|
||||
) -> Any:
|
||||
thinking_config = None
|
||||
if isinstance(extra_body, dict):
|
||||
thinking_config = extra_body.get("thinking_config") or extra_body.get("thinkingConfig")
|
||||
|
||||
request = build_gemini_request(
|
||||
messages=messages or [],
|
||||
tools=tools,
|
||||
tool_choice=tool_choice,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
top_p=top_p,
|
||||
stop=stop,
|
||||
thinking_config=thinking_config,
|
||||
)
|
||||
|
||||
if stream:
|
||||
return self._stream_completion(model=model, request=request, timeout=timeout)
|
||||
|
||||
url = f"{self.base_url}/models/{model}:generateContent"
|
||||
response = self._http.post(url, json=request, headers=self._headers(), timeout=timeout)
|
||||
if response.status_code != 200:
|
||||
raise gemini_http_error(response)
|
||||
try:
|
||||
payload = response.json()
|
||||
except ValueError as exc:
|
||||
raise GeminiAPIError(
|
||||
f"Invalid JSON from Gemini native API: {exc}",
|
||||
code="gemini_invalid_json",
|
||||
status_code=response.status_code,
|
||||
response=response,
|
||||
) from exc
|
||||
return translate_gemini_response(payload, model=model)
|
||||
|
||||
def _stream_completion(self, *, model: str, request: Dict[str, Any], timeout: Any = None) -> Iterator[_GeminiStreamChunk]:
|
||||
url = f"{self.base_url}/models/{model}:streamGenerateContent?alt=sse"
|
||||
stream_headers = dict(self._headers())
|
||||
stream_headers["Accept"] = "text/event-stream"
|
||||
|
||||
def _generator() -> Iterator[_GeminiStreamChunk]:
|
||||
try:
|
||||
with self._http.stream("POST", url, json=request, headers=stream_headers, timeout=timeout) as response:
|
||||
if response.status_code != 200:
|
||||
response.read()
|
||||
raise gemini_http_error(response)
|
||||
tool_call_indices: Dict[str, Dict[str, Any]] = {}
|
||||
for event in _iter_sse_events(response):
|
||||
for chunk in translate_stream_event(event, model, tool_call_indices):
|
||||
yield chunk
|
||||
except httpx.HTTPError as exc:
|
||||
raise GeminiAPIError(
|
||||
f"Gemini streaming request failed: {exc}",
|
||||
code="gemini_stream_error",
|
||||
) from exc
|
||||
|
||||
return _generator()
|
||||
|
||||
|
||||
class AsyncGeminiNativeClient:
|
||||
"""Async wrapper used by auxiliary_client for native Gemini calls."""
|
||||
|
||||
def __init__(self, sync_client: GeminiNativeClient):
|
||||
self._sync = sync_client
|
||||
self.api_key = sync_client.api_key
|
||||
self.base_url = sync_client.base_url
|
||||
self.chat = _AsyncGeminiChatNamespace(self)
|
||||
|
||||
async def _create_chat_completion(self, **kwargs: Any) -> Any:
|
||||
stream = bool(kwargs.get("stream"))
|
||||
result = await asyncio.to_thread(self._sync.chat.completions.create, **kwargs)
|
||||
if not stream:
|
||||
return result
|
||||
|
||||
async def _async_stream() -> Any:
|
||||
while True:
|
||||
done, chunk = await asyncio.to_thread(self._sync._advance_stream_iterator, result)
|
||||
if done:
|
||||
break
|
||||
yield chunk
|
||||
|
||||
return _async_stream()
|
||||
|
||||
async def close(self) -> None:
|
||||
await asyncio.to_thread(self._sync.close)
|
||||
@@ -1,99 +0,0 @@
|
||||
"""Helpers for translating OpenAI-style tool schemas to Gemini's schema subset."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
# Gemini's ``FunctionDeclaration.parameters`` field accepts the ``Schema``
|
||||
# object, which is only a subset of OpenAPI 3.0 / JSON Schema. Strip fields
|
||||
# outside that subset before sending Hermes tool schemas to Google.
|
||||
_GEMINI_SCHEMA_ALLOWED_KEYS = {
|
||||
"type",
|
||||
"format",
|
||||
"title",
|
||||
"description",
|
||||
"nullable",
|
||||
"enum",
|
||||
"maxItems",
|
||||
"minItems",
|
||||
"properties",
|
||||
"required",
|
||||
"minProperties",
|
||||
"maxProperties",
|
||||
"minLength",
|
||||
"maxLength",
|
||||
"pattern",
|
||||
"example",
|
||||
"anyOf",
|
||||
"propertyOrdering",
|
||||
"default",
|
||||
"items",
|
||||
"minimum",
|
||||
"maximum",
|
||||
}
|
||||
|
||||
|
||||
def sanitize_gemini_schema(schema: Any) -> Dict[str, Any]:
|
||||
"""Return a Gemini-compatible copy of a tool parameter schema.
|
||||
|
||||
Hermes tool schemas are OpenAI-flavored JSON Schema and may contain keys
|
||||
such as ``$schema`` or ``additionalProperties`` that Google's Gemini
|
||||
``Schema`` object rejects. This helper preserves the documented Gemini
|
||||
subset and recursively sanitizes nested ``properties`` / ``items`` /
|
||||
``anyOf`` definitions.
|
||||
"""
|
||||
|
||||
if not isinstance(schema, dict):
|
||||
return {}
|
||||
|
||||
cleaned: Dict[str, Any] = {}
|
||||
for key, value in schema.items():
|
||||
if key not in _GEMINI_SCHEMA_ALLOWED_KEYS:
|
||||
continue
|
||||
if key == "properties":
|
||||
if not isinstance(value, dict):
|
||||
continue
|
||||
props: Dict[str, Any] = {}
|
||||
for prop_name, prop_schema in value.items():
|
||||
if not isinstance(prop_name, str):
|
||||
continue
|
||||
props[prop_name] = sanitize_gemini_schema(prop_schema)
|
||||
cleaned[key] = props
|
||||
continue
|
||||
if key == "items":
|
||||
cleaned[key] = sanitize_gemini_schema(value)
|
||||
continue
|
||||
if key == "anyOf":
|
||||
if not isinstance(value, list):
|
||||
continue
|
||||
cleaned[key] = [
|
||||
sanitize_gemini_schema(item)
|
||||
for item in value
|
||||
if isinstance(item, dict)
|
||||
]
|
||||
continue
|
||||
cleaned[key] = value
|
||||
|
||||
# Gemini's Schema validator requires every ``enum`` entry to be a string,
|
||||
# even when the parent ``type`` is ``integer`` / ``number`` / ``boolean``.
|
||||
# OpenAI / OpenRouter / Anthropic accept typed enums (e.g. Discord's
|
||||
# ``auto_archive_duration: {type: integer, enum: [60, 1440, 4320, 10080]}``),
|
||||
# so we only drop the ``enum`` when it would collide with Gemini's rule.
|
||||
# Keeping ``type: integer`` plus the human-readable description gives the
|
||||
# model enough guidance; the tool handler still validates the value.
|
||||
enum_val = cleaned.get("enum")
|
||||
type_val = cleaned.get("type")
|
||||
if isinstance(enum_val, list) and type_val in {"integer", "number", "boolean"}:
|
||||
if any(not isinstance(item, str) for item in enum_val):
|
||||
cleaned.pop("enum", None)
|
||||
|
||||
return cleaned
|
||||
|
||||
|
||||
def sanitize_gemini_tool_parameters(parameters: Any) -> Dict[str, Any]:
|
||||
"""Normalize tool parameters to a valid Gemini object schema."""
|
||||
|
||||
cleaned = sanitize_gemini_schema(parameters)
|
||||
if not cleaned:
|
||||
return {"type": "object", "properties": {}}
|
||||
return cleaned
|
||||
@@ -1,452 +0,0 @@
|
||||
"""Google Code Assist API client — project discovery, onboarding, quota.
|
||||
|
||||
The Code Assist API powers Google's official gemini-cli. It sits at
|
||||
``cloudcode-pa.googleapis.com`` and provides:
|
||||
|
||||
- Free tier access (generous daily quota) for personal Google accounts
|
||||
- Paid tier access via GCP projects with billing / Workspace / Standard / Enterprise
|
||||
|
||||
This module handles the control-plane dance needed before inference:
|
||||
|
||||
1. ``load_code_assist()`` — probe the user's account to learn what tier they're on
|
||||
and whether a ``cloudaicompanionProject`` is already assigned.
|
||||
2. ``onboard_user()`` — if the user hasn't been onboarded yet (new account, fresh
|
||||
free tier, etc.), call this with the chosen tier + project id. Supports LRO
|
||||
polling for slow provisioning.
|
||||
3. ``retrieve_user_quota()`` — fetch the ``buckets[]`` array showing remaining
|
||||
quota per model, used by the ``/gquota`` slash command.
|
||||
|
||||
VPC-SC handling: enterprise accounts under a VPC Service Controls perimeter
|
||||
will get ``SECURITY_POLICY_VIOLATED`` on ``load_code_assist``. We catch this
|
||||
and force the account to ``standard-tier`` so the call chain still succeeds.
|
||||
|
||||
Derived from opencode-gemini-auth (MIT) and clawdbot/extensions/google. The
|
||||
request/response shapes are specific to Google's internal Code Assist API,
|
||||
documented nowhere public — we copy them from the reference implementations.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Constants
|
||||
# =============================================================================
|
||||
|
||||
CODE_ASSIST_ENDPOINT = "https://cloudcode-pa.googleapis.com"
|
||||
|
||||
# Fallback endpoints tried when prod returns an error during project discovery
|
||||
FALLBACK_ENDPOINTS = [
|
||||
"https://daily-cloudcode-pa.sandbox.googleapis.com",
|
||||
"https://autopush-cloudcode-pa.sandbox.googleapis.com",
|
||||
]
|
||||
|
||||
# Tier identifiers that Google's API uses
|
||||
FREE_TIER_ID = "free-tier"
|
||||
LEGACY_TIER_ID = "legacy-tier"
|
||||
STANDARD_TIER_ID = "standard-tier"
|
||||
|
||||
# Default HTTP headers matching gemini-cli's fingerprint.
|
||||
# Google may reject unrecognized User-Agents on these internal endpoints.
|
||||
_GEMINI_CLI_USER_AGENT = "google-api-nodejs-client/9.15.1 (gzip)"
|
||||
_X_GOOG_API_CLIENT = "gl-node/24.0.0"
|
||||
_DEFAULT_REQUEST_TIMEOUT = 30.0
|
||||
_ONBOARDING_POLL_ATTEMPTS = 12
|
||||
_ONBOARDING_POLL_INTERVAL_SECONDS = 5.0
|
||||
|
||||
|
||||
class CodeAssistError(RuntimeError):
|
||||
"""Exception raised by the Code Assist (``cloudcode-pa``) integration.
|
||||
|
||||
Carries HTTP status / response / retry-after metadata so the agent's
|
||||
``error_classifier._extract_status_code`` and the main loop's Retry-After
|
||||
handling (which walks ``error.response.headers``) pick up the right
|
||||
signals. Without these, 429s from the OAuth path look like opaque
|
||||
``RuntimeError`` and skip the rate-limit path.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
*,
|
||||
code: str = "code_assist_error",
|
||||
status_code: Optional[int] = None,
|
||||
response: Any = None,
|
||||
retry_after: Optional[float] = None,
|
||||
details: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
# ``status_code`` is picked up by ``agent.error_classifier._extract_status_code``
|
||||
# so a 429 from Code Assist classifies as FailoverReason.rate_limit and
|
||||
# triggers the main loop's fallback_providers chain the same way SDK
|
||||
# errors do.
|
||||
self.status_code = status_code
|
||||
# ``response`` is the underlying ``httpx.Response`` (or a shim with a
|
||||
# ``.headers`` mapping and ``.json()`` method). The main loop reads
|
||||
# ``error.response.headers["Retry-After"]`` to honor Google's retry
|
||||
# hints when the backend throttles us.
|
||||
self.response = response
|
||||
# Parsed ``Retry-After`` seconds (kept separately for convenience —
|
||||
# Google returns retry hints in both the header and the error body's
|
||||
# ``google.rpc.RetryInfo`` details, and we pick whichever we found).
|
||||
self.retry_after = retry_after
|
||||
# Parsed structured error details from the Google error envelope
|
||||
# (e.g. ``{"reason": "MODEL_CAPACITY_EXHAUSTED", "status": "RESOURCE_EXHAUSTED"}``).
|
||||
# Useful for logging and for tests that want to assert on specifics.
|
||||
self.details = details or {}
|
||||
|
||||
|
||||
class ProjectIdRequiredError(CodeAssistError):
|
||||
def __init__(self, message: str = "GCP project id required for this tier") -> None:
|
||||
super().__init__(message, code="code_assist_project_id_required")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# HTTP primitive (auth via Bearer token passed per-call)
|
||||
# =============================================================================
|
||||
|
||||
def _build_headers(access_token: str, *, user_agent_model: str = "") -> Dict[str, str]:
|
||||
ua = _GEMINI_CLI_USER_AGENT
|
||||
if user_agent_model:
|
||||
ua = f"{ua} model/{user_agent_model}"
|
||||
return {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"User-Agent": ua,
|
||||
"X-Goog-Api-Client": _X_GOOG_API_CLIENT,
|
||||
"x-activity-request-id": str(uuid.uuid4()),
|
||||
}
|
||||
|
||||
|
||||
def _client_metadata() -> Dict[str, str]:
|
||||
"""Match Google's gemini-cli exactly — unrecognized metadata may be rejected."""
|
||||
return {
|
||||
"ideType": "IDE_UNSPECIFIED",
|
||||
"platform": "PLATFORM_UNSPECIFIED",
|
||||
"pluginType": "GEMINI",
|
||||
}
|
||||
|
||||
|
||||
def _post_json(
|
||||
url: str,
|
||||
body: Dict[str, Any],
|
||||
access_token: str,
|
||||
*,
|
||||
timeout: float = _DEFAULT_REQUEST_TIMEOUT,
|
||||
user_agent_model: str = "",
|
||||
) -> Dict[str, Any]:
|
||||
data = json.dumps(body).encode("utf-8")
|
||||
request = urllib.request.Request(
|
||||
url, data=data, method="POST",
|
||||
headers=_build_headers(access_token, user_agent_model=user_agent_model),
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(request, timeout=timeout) as response:
|
||||
raw = response.read().decode("utf-8", errors="replace")
|
||||
return json.loads(raw) if raw else {}
|
||||
except urllib.error.HTTPError as exc:
|
||||
detail = ""
|
||||
try:
|
||||
detail = exc.read().decode("utf-8", errors="replace")
|
||||
except Exception:
|
||||
pass
|
||||
# Special case: VPC-SC violation should be distinguishable
|
||||
if _is_vpc_sc_violation(detail):
|
||||
raise CodeAssistError(
|
||||
f"VPC-SC policy violation: {detail}",
|
||||
code="code_assist_vpc_sc",
|
||||
) from exc
|
||||
raise CodeAssistError(
|
||||
f"Code Assist HTTP {exc.code}: {detail or exc.reason}",
|
||||
code=f"code_assist_http_{exc.code}",
|
||||
) from exc
|
||||
except urllib.error.URLError as exc:
|
||||
raise CodeAssistError(
|
||||
f"Code Assist request failed: {exc}",
|
||||
code="code_assist_network_error",
|
||||
) from exc
|
||||
|
||||
|
||||
def _is_vpc_sc_violation(body: str) -> bool:
|
||||
"""Detect a VPC Service Controls violation from a response body."""
|
||||
if not body:
|
||||
return False
|
||||
try:
|
||||
parsed = json.loads(body)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
return "SECURITY_POLICY_VIOLATED" in body
|
||||
# Walk the nested error structure Google uses
|
||||
error = parsed.get("error") if isinstance(parsed, dict) else None
|
||||
if not isinstance(error, dict):
|
||||
return False
|
||||
details = error.get("details") or []
|
||||
if isinstance(details, list):
|
||||
for item in details:
|
||||
if isinstance(item, dict):
|
||||
reason = item.get("reason") or ""
|
||||
if reason == "SECURITY_POLICY_VIOLATED":
|
||||
return True
|
||||
msg = str(error.get("message", ""))
|
||||
return "SECURITY_POLICY_VIOLATED" in msg
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# load_code_assist — discovers current tier + assigned project
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class CodeAssistProjectInfo:
|
||||
"""Result from ``load_code_assist``."""
|
||||
current_tier_id: str = ""
|
||||
cloudaicompanion_project: str = "" # Google-managed project (free tier)
|
||||
allowed_tiers: List[str] = field(default_factory=list)
|
||||
raw: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
def load_code_assist(
|
||||
access_token: str,
|
||||
*,
|
||||
project_id: str = "",
|
||||
user_agent_model: str = "",
|
||||
) -> CodeAssistProjectInfo:
|
||||
"""Call ``POST /v1internal:loadCodeAssist`` with prod → sandbox fallback.
|
||||
|
||||
Returns whatever tier + project info Google reports. On VPC-SC violations,
|
||||
returns a synthetic ``standard-tier`` result so the chain can continue.
|
||||
"""
|
||||
body: Dict[str, Any] = {
|
||||
"metadata": {
|
||||
"duetProject": project_id,
|
||||
**_client_metadata(),
|
||||
},
|
||||
}
|
||||
if project_id:
|
||||
body["cloudaicompanionProject"] = project_id
|
||||
|
||||
endpoints = [CODE_ASSIST_ENDPOINT] + FALLBACK_ENDPOINTS
|
||||
last_err: Optional[Exception] = None
|
||||
for endpoint in endpoints:
|
||||
url = f"{endpoint}/v1internal:loadCodeAssist"
|
||||
try:
|
||||
resp = _post_json(url, body, access_token, user_agent_model=user_agent_model)
|
||||
return _parse_load_response(resp)
|
||||
except CodeAssistError as exc:
|
||||
if exc.code == "code_assist_vpc_sc":
|
||||
logger.info("VPC-SC violation on %s — defaulting to standard-tier", endpoint)
|
||||
return CodeAssistProjectInfo(
|
||||
current_tier_id=STANDARD_TIER_ID,
|
||||
cloudaicompanion_project=project_id,
|
||||
)
|
||||
last_err = exc
|
||||
logger.warning("loadCodeAssist failed on %s: %s", endpoint, exc)
|
||||
continue
|
||||
if last_err:
|
||||
raise last_err
|
||||
return CodeAssistProjectInfo()
|
||||
|
||||
|
||||
def _parse_load_response(resp: Dict[str, Any]) -> CodeAssistProjectInfo:
|
||||
current_tier = resp.get("currentTier") or {}
|
||||
tier_id = str(current_tier.get("id") or "") if isinstance(current_tier, dict) else ""
|
||||
project = str(resp.get("cloudaicompanionProject") or "")
|
||||
allowed = resp.get("allowedTiers") or []
|
||||
allowed_ids: List[str] = []
|
||||
if isinstance(allowed, list):
|
||||
for t in allowed:
|
||||
if isinstance(t, dict):
|
||||
tid = str(t.get("id") or "")
|
||||
if tid:
|
||||
allowed_ids.append(tid)
|
||||
return CodeAssistProjectInfo(
|
||||
current_tier_id=tier_id,
|
||||
cloudaicompanion_project=project,
|
||||
allowed_tiers=allowed_ids,
|
||||
raw=resp,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# onboard_user — provisions a new user on a tier (with LRO polling)
|
||||
# =============================================================================
|
||||
|
||||
def onboard_user(
|
||||
access_token: str,
|
||||
*,
|
||||
tier_id: str,
|
||||
project_id: str = "",
|
||||
user_agent_model: str = "",
|
||||
) -> Dict[str, Any]:
|
||||
"""Call ``POST /v1internal:onboardUser`` to provision the user.
|
||||
|
||||
For paid tiers, ``project_id`` is REQUIRED (raises ProjectIdRequiredError).
|
||||
For free tiers, ``project_id`` is optional — Google will assign one.
|
||||
|
||||
Returns the final operation response. Polls ``/v1internal/<name>`` for up
|
||||
to ``_ONBOARDING_POLL_ATTEMPTS`` × ``_ONBOARDING_POLL_INTERVAL_SECONDS``
|
||||
(default: 12 × 5s = 1 min).
|
||||
"""
|
||||
if tier_id != FREE_TIER_ID and tier_id != LEGACY_TIER_ID and not project_id:
|
||||
raise ProjectIdRequiredError(
|
||||
f"Tier {tier_id!r} requires a GCP project id. "
|
||||
"Set HERMES_GEMINI_PROJECT_ID or GOOGLE_CLOUD_PROJECT."
|
||||
)
|
||||
|
||||
body: Dict[str, Any] = {
|
||||
"tierId": tier_id,
|
||||
"metadata": _client_metadata(),
|
||||
}
|
||||
if project_id:
|
||||
body["cloudaicompanionProject"] = project_id
|
||||
|
||||
endpoint = CODE_ASSIST_ENDPOINT
|
||||
url = f"{endpoint}/v1internal:onboardUser"
|
||||
resp = _post_json(url, body, access_token, user_agent_model=user_agent_model)
|
||||
|
||||
# Poll if LRO (long-running operation)
|
||||
if not resp.get("done"):
|
||||
op_name = resp.get("name", "")
|
||||
if not op_name:
|
||||
return resp
|
||||
for attempt in range(_ONBOARDING_POLL_ATTEMPTS):
|
||||
time.sleep(_ONBOARDING_POLL_INTERVAL_SECONDS)
|
||||
poll_url = f"{endpoint}/v1internal/{op_name}"
|
||||
try:
|
||||
poll_resp = _post_json(poll_url, {}, access_token, user_agent_model=user_agent_model)
|
||||
except CodeAssistError as exc:
|
||||
logger.warning("Onboarding poll attempt %d failed: %s", attempt + 1, exc)
|
||||
continue
|
||||
if poll_resp.get("done"):
|
||||
return poll_resp
|
||||
logger.warning("Onboarding did not complete within %d attempts", _ONBOARDING_POLL_ATTEMPTS)
|
||||
return resp
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# retrieve_user_quota — for /gquota
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class QuotaBucket:
|
||||
model_id: str
|
||||
token_type: str = ""
|
||||
remaining_fraction: float = 0.0
|
||||
reset_time_iso: str = ""
|
||||
raw: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
def retrieve_user_quota(
|
||||
access_token: str,
|
||||
*,
|
||||
project_id: str = "",
|
||||
user_agent_model: str = "",
|
||||
) -> List[QuotaBucket]:
|
||||
"""Call ``POST /v1internal:retrieveUserQuota`` and parse ``buckets[]``."""
|
||||
body: Dict[str, Any] = {}
|
||||
if project_id:
|
||||
body["project"] = project_id
|
||||
url = f"{CODE_ASSIST_ENDPOINT}/v1internal:retrieveUserQuota"
|
||||
resp = _post_json(url, body, access_token, user_agent_model=user_agent_model)
|
||||
raw_buckets = resp.get("buckets") or []
|
||||
buckets: List[QuotaBucket] = []
|
||||
if not isinstance(raw_buckets, list):
|
||||
return buckets
|
||||
for b in raw_buckets:
|
||||
if not isinstance(b, dict):
|
||||
continue
|
||||
buckets.append(QuotaBucket(
|
||||
model_id=str(b.get("modelId") or ""),
|
||||
token_type=str(b.get("tokenType") or ""),
|
||||
remaining_fraction=float(b.get("remainingFraction") or 0.0),
|
||||
reset_time_iso=str(b.get("resetTime") or ""),
|
||||
raw=b,
|
||||
))
|
||||
return buckets
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Project context resolution
|
||||
# =============================================================================
|
||||
|
||||
@dataclass
|
||||
class ProjectContext:
|
||||
"""Resolved state for a given OAuth session."""
|
||||
project_id: str = "" # effective project id sent on requests
|
||||
managed_project_id: str = "" # Google-assigned project (free tier)
|
||||
tier_id: str = ""
|
||||
source: str = "" # "env", "config", "discovered", "onboarded"
|
||||
|
||||
|
||||
def resolve_project_context(
|
||||
access_token: str,
|
||||
*,
|
||||
configured_project_id: str = "",
|
||||
env_project_id: str = "",
|
||||
user_agent_model: str = "",
|
||||
) -> ProjectContext:
|
||||
"""Figure out what project id + tier to use for requests.
|
||||
|
||||
Priority:
|
||||
1. If configured_project_id or env_project_id is set, use that directly
|
||||
and short-circuit (no discovery needed).
|
||||
2. Otherwise call loadCodeAssist to see what Google says.
|
||||
3. If no tier assigned yet, onboard the user (free tier default).
|
||||
"""
|
||||
# Short-circuit: caller provided a project id
|
||||
if configured_project_id:
|
||||
return ProjectContext(
|
||||
project_id=configured_project_id,
|
||||
tier_id=STANDARD_TIER_ID, # assume paid since they specified one
|
||||
source="config",
|
||||
)
|
||||
if env_project_id:
|
||||
return ProjectContext(
|
||||
project_id=env_project_id,
|
||||
tier_id=STANDARD_TIER_ID,
|
||||
source="env",
|
||||
)
|
||||
|
||||
# Discover via loadCodeAssist
|
||||
info = load_code_assist(access_token, user_agent_model=user_agent_model)
|
||||
|
||||
effective_project = info.cloudaicompanion_project
|
||||
tier = info.current_tier_id
|
||||
|
||||
if not tier:
|
||||
# User hasn't been onboarded — provision them on free tier
|
||||
onboard_resp = onboard_user(
|
||||
access_token,
|
||||
tier_id=FREE_TIER_ID,
|
||||
project_id="",
|
||||
user_agent_model=user_agent_model,
|
||||
)
|
||||
# Re-parse from the onboard response
|
||||
response_body = onboard_resp.get("response") or {}
|
||||
if isinstance(response_body, dict):
|
||||
effective_project = (
|
||||
effective_project
|
||||
or str(response_body.get("cloudaicompanionProject") or "")
|
||||
)
|
||||
tier = FREE_TIER_ID
|
||||
source = "onboarded"
|
||||
else:
|
||||
source = "discovered"
|
||||
|
||||
return ProjectContext(
|
||||
project_id=effective_project,
|
||||
managed_project_id=effective_project if tier == FREE_TIER_ID else "",
|
||||
tier_id=tier,
|
||||
source=source,
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
233
agent/i18n.py
233
agent/i18n.py
@@ -1,233 +0,0 @@
|
||||
"""Lightweight internationalization (i18n) for Hermes static user-facing messages.
|
||||
|
||||
Scope (thin slice, by design): only the highest-impact static strings shown
|
||||
to the user by Hermes itself -- approval prompts, a handful of gateway slash
|
||||
command replies, restart-drain notices. Agent-generated output, log lines,
|
||||
error tracebacks, tool outputs, and slash-command descriptions all stay in
|
||||
English.
|
||||
|
||||
Catalog files live under ``locales/<lang>.yaml`` at the repo root. Each
|
||||
catalog is a flat dict keyed by dotted paths (e.g. ``approval.choose`` or
|
||||
``gateway.approval_expired``). Missing keys fall back to English; if English
|
||||
is missing too, the key path itself is returned so a broken catalog never
|
||||
crashes the agent.
|
||||
|
||||
Usage::
|
||||
|
||||
from agent.i18n import t
|
||||
print(t("approval.choose_long")) # current lang
|
||||
print(t("gateway.draining", count=3)) # {count} formatted
|
||||
print(t("approval.choose_long", lang="zh")) # explicit override
|
||||
|
||||
Language resolution order:
|
||||
1. Explicit ``lang=`` argument passed to :func:`t`
|
||||
2. ``HERMES_LANGUAGE`` environment variable (for tests / quick override)
|
||||
3. ``display.language`` from config.yaml
|
||||
4. ``"en"`` (baseline)
|
||||
|
||||
Supported languages: en, zh, ja, de, es, fr, tr, uk. Unknown values fall back to en.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SUPPORTED_LANGUAGES: tuple[str, ...] = ("en", "zh", "ja", "de", "es", "fr", "tr", "uk")
|
||||
DEFAULT_LANGUAGE = "en"
|
||||
|
||||
# Accept a few natural aliases so users who type "chinese" / "zh-CN" / "jp"
|
||||
# get the right catalog instead of silently falling back to English.
|
||||
_LANGUAGE_ALIASES: dict[str, str] = {
|
||||
"english": "en", "en-us": "en", "en-gb": "en",
|
||||
"chinese": "zh", "mandarin": "zh", "zh-cn": "zh", "zh-tw": "zh", "zh-hans": "zh", "zh-hant": "zh",
|
||||
"japanese": "ja", "jp": "ja", "ja-jp": "ja",
|
||||
"german": "de", "deutsch": "de", "de-de": "de",
|
||||
"spanish": "es", "español": "es", "espanol": "es", "es-es": "es", "es-mx": "es",
|
||||
"french": "fr", "français": "fr", "france": "fr", "fr-fr": "fr", "fr-be": "fr", "fr-ca": "fr", "fr-ch": "fr",
|
||||
"ukrainian": "uk", "ukrainisch": "uk", "українська": "uk", "uk-ua": "uk", "ua": "uk",
|
||||
"turkish": "tr", "türkçe": "tr", "tr-tr": "tr",
|
||||
}
|
||||
|
||||
_catalog_cache: dict[str, dict[str, str]] = {}
|
||||
_catalog_lock = threading.Lock()
|
||||
|
||||
|
||||
def _locales_dir() -> Path:
|
||||
"""Return the directory containing locale YAML files.
|
||||
|
||||
Lives next to the repo root so both the bundled install and editable
|
||||
checkouts find it without PYTHONPATH gymnastics.
|
||||
"""
|
||||
# agent/i18n.py -> agent/ -> repo root
|
||||
return Path(__file__).resolve().parent.parent / "locales"
|
||||
|
||||
|
||||
def _normalize_lang(value: Any) -> str:
|
||||
"""Normalize a user-supplied language value to a supported code.
|
||||
|
||||
Accepts supported codes directly, common aliases (``chinese`` -> ``zh``),
|
||||
and case-insensitive regional tags (``zh-CN`` -> ``zh``). Returns the
|
||||
default language for unknown values.
|
||||
"""
|
||||
if not isinstance(value, str):
|
||||
return DEFAULT_LANGUAGE
|
||||
key = value.strip().lower()
|
||||
if not key:
|
||||
return DEFAULT_LANGUAGE
|
||||
if key in SUPPORTED_LANGUAGES:
|
||||
return key
|
||||
if key in _LANGUAGE_ALIASES:
|
||||
return _LANGUAGE_ALIASES[key]
|
||||
# Try stripping a region suffix (e.g. "pt-br" -> "pt" won't be supported,
|
||||
# but "zh-CN" -> "zh" will).
|
||||
base = key.split("-", 1)[0]
|
||||
if base in SUPPORTED_LANGUAGES:
|
||||
return base
|
||||
return DEFAULT_LANGUAGE
|
||||
|
||||
|
||||
def _load_catalog(lang: str) -> dict[str, str]:
|
||||
"""Load and flatten one locale YAML file into a dotted-key dict.
|
||||
|
||||
YAML files can be nested for human readability; this produces the flat
|
||||
key space :func:`t` expects. Cached per-language for the process.
|
||||
"""
|
||||
with _catalog_lock:
|
||||
cached = _catalog_cache.get(lang)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
path = _locales_dir() / f"{lang}.yaml"
|
||||
if not path.is_file():
|
||||
logger.debug("i18n catalog missing for %s at %s", lang, path)
|
||||
with _catalog_lock:
|
||||
_catalog_cache[lang] = {}
|
||||
return {}
|
||||
|
||||
try:
|
||||
import yaml # PyYAML is already a hermes dependency
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
raw = yaml.safe_load(f) or {}
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to load i18n catalog %s: %s", path, exc)
|
||||
with _catalog_lock:
|
||||
_catalog_cache[lang] = {}
|
||||
return {}
|
||||
|
||||
flat: dict[str, str] = {}
|
||||
_flatten_into(raw, "", flat)
|
||||
with _catalog_lock:
|
||||
_catalog_cache[lang] = flat
|
||||
return flat
|
||||
|
||||
|
||||
def _flatten_into(node: Any, prefix: str, out: dict[str, str]) -> None:
|
||||
if isinstance(node, dict):
|
||||
for key, value in node.items():
|
||||
child_key = f"{prefix}.{key}" if prefix else str(key)
|
||||
_flatten_into(value, child_key, out)
|
||||
elif isinstance(node, str):
|
||||
out[prefix] = node
|
||||
# Non-string, non-dict leaves are ignored -- catalogs are text-only.
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _config_language_cached() -> str | None:
|
||||
"""Read ``display.language`` from config.yaml once per process.
|
||||
|
||||
Cached because ``t()`` is called in hot paths (every approval prompt,
|
||||
every gateway reply) and re-reading YAML each call would be wasteful.
|
||||
``reset_language_cache()`` clears this when config changes at runtime
|
||||
(e.g. after the setup wizard).
|
||||
"""
|
||||
try:
|
||||
from hermes_cli.config import load_config
|
||||
cfg = load_config()
|
||||
lang = (cfg.get("display") or {}).get("language")
|
||||
if lang:
|
||||
return _normalize_lang(lang)
|
||||
except Exception as exc:
|
||||
logger.debug("Could not read display.language from config: %s", exc)
|
||||
return None
|
||||
|
||||
|
||||
def reset_language_cache() -> None:
|
||||
"""Invalidate cached language resolution and catalogs.
|
||||
|
||||
Call after :func:`hermes_cli.config.save_config` if a running process
|
||||
needs to pick up a changed ``display.language`` without restart.
|
||||
"""
|
||||
_config_language_cached.cache_clear()
|
||||
with _catalog_lock:
|
||||
_catalog_cache.clear()
|
||||
|
||||
|
||||
def get_language() -> str:
|
||||
"""Resolve the active language using env > config > default order."""
|
||||
env_lang = os.environ.get("HERMES_LANGUAGE")
|
||||
if env_lang:
|
||||
return _normalize_lang(env_lang)
|
||||
cfg_lang = _config_language_cached()
|
||||
if cfg_lang:
|
||||
return cfg_lang
|
||||
return DEFAULT_LANGUAGE
|
||||
|
||||
|
||||
def t(key: str, lang: str | None = None, **format_kwargs: Any) -> str:
|
||||
"""Translate a dotted key to the active language.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
key
|
||||
Dotted path into the catalog, e.g. ``"approval.choose_long"``.
|
||||
lang
|
||||
Explicit language override. Takes precedence over env + config.
|
||||
**format_kwargs
|
||||
``str.format`` substitution arguments (``t("gateway.drain", count=3)``
|
||||
expects a catalog entry with a ``{count}`` placeholder).
|
||||
|
||||
Returns
|
||||
-------
|
||||
The translated string, or the English fallback if the key is missing in
|
||||
the target language, or the bare key if English is also missing.
|
||||
"""
|
||||
target = _normalize_lang(lang) if lang else get_language()
|
||||
catalog = _load_catalog(target)
|
||||
value = catalog.get(key)
|
||||
|
||||
if value is None and target != DEFAULT_LANGUAGE:
|
||||
# Fall through to English rather than showing a key path to the user.
|
||||
value = _load_catalog(DEFAULT_LANGUAGE).get(key)
|
||||
|
||||
if value is None:
|
||||
# Last-ditch: return the key itself. A broken catalog should not
|
||||
# crash anything; it just looks ugly until someone fixes it.
|
||||
logger.debug("i18n miss: key=%r lang=%r", key, target)
|
||||
value = key
|
||||
|
||||
if format_kwargs:
|
||||
try:
|
||||
return value.format(**format_kwargs)
|
||||
except (KeyError, IndexError, ValueError) as exc:
|
||||
logger.warning(
|
||||
"i18n format failed for key=%r lang=%r kwargs=%r: %s",
|
||||
key, target, format_kwargs, exc,
|
||||
)
|
||||
return value
|
||||
return value
|
||||
|
||||
|
||||
__all__ = [
|
||||
"SUPPORTED_LANGUAGES",
|
||||
"DEFAULT_LANGUAGE",
|
||||
"t",
|
||||
"get_language",
|
||||
"reset_language_cache",
|
||||
]
|
||||
@@ -1,242 +0,0 @@
|
||||
"""
|
||||
Image Generation Provider ABC
|
||||
=============================
|
||||
|
||||
Defines the pluggable-backend interface for image generation. Providers register
|
||||
instances via ``PluginContext.register_image_gen_provider()``; the active one
|
||||
(selected via ``image_gen.provider`` in ``config.yaml``) services every
|
||||
``image_generate`` tool call.
|
||||
|
||||
Providers live in ``<repo>/plugins/image_gen/<name>/`` (built-in, auto-loaded
|
||||
as ``kind: backend``) or ``~/.hermes/plugins/image_gen/<name>/`` (user, opt-in
|
||||
via ``plugins.enabled``).
|
||||
|
||||
Response shape
|
||||
--------------
|
||||
All providers return a dict that :func:`success_response` / :func:`error_response`
|
||||
produce. The tool wrapper JSON-serializes it. Keys:
|
||||
|
||||
success bool
|
||||
image str | None URL or absolute file path
|
||||
model str provider-specific model identifier
|
||||
prompt str echoed prompt
|
||||
aspect_ratio str "landscape" | "square" | "portrait"
|
||||
provider str provider name (for diagnostics)
|
||||
error str only when success=False
|
||||
error_type str only when success=False
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
VALID_ASPECT_RATIOS: Tuple[str, ...] = ("landscape", "square", "portrait")
|
||||
DEFAULT_ASPECT_RATIO = "landscape"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ABC
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ImageGenProvider(abc.ABC):
|
||||
"""Abstract base class for an image generation backend.
|
||||
|
||||
Subclasses must implement :meth:`generate`. Everything else has sane
|
||||
defaults — override only what your provider needs.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def name(self) -> str:
|
||||
"""Stable short identifier used in ``image_gen.provider`` config.
|
||||
|
||||
Lowercase, no spaces. Examples: ``fal``, ``openai``, ``replicate``.
|
||||
"""
|
||||
|
||||
@property
|
||||
def display_name(self) -> str:
|
||||
"""Human-readable label shown in ``hermes tools``. Defaults to ``name.title()``."""
|
||||
return self.name.title()
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Return True when this provider can service calls.
|
||||
|
||||
Typically checks for a required API key. Default: True
|
||||
(providers with no external dependencies are always available).
|
||||
"""
|
||||
return True
|
||||
|
||||
def list_models(self) -> List[Dict[str, Any]]:
|
||||
"""Return catalog entries for ``hermes tools`` model picker.
|
||||
|
||||
Each entry::
|
||||
|
||||
{
|
||||
"id": "gpt-image-1.5", # required
|
||||
"display": "GPT Image 1.5", # optional; defaults to id
|
||||
"speed": "~10s", # optional
|
||||
"strengths": "...", # optional
|
||||
"price": "$...", # optional
|
||||
}
|
||||
|
||||
Default: empty list (provider has no user-selectable models).
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_setup_schema(self) -> Dict[str, Any]:
|
||||
"""Return provider metadata for the ``hermes tools`` picker.
|
||||
|
||||
Used by ``tools_config.py`` to inject this provider as a row in
|
||||
the Image Generation provider list. Shape::
|
||||
|
||||
{
|
||||
"name": "OpenAI", # picker label
|
||||
"badge": "paid", # optional short tag
|
||||
"tag": "One-line description...", # optional subtitle
|
||||
"env_vars": [ # keys to prompt for
|
||||
{"key": "OPENAI_API_KEY",
|
||||
"prompt": "OpenAI API key",
|
||||
"url": "https://platform.openai.com/api-keys"},
|
||||
],
|
||||
}
|
||||
|
||||
Default: minimal entry derived from ``display_name``. Override to
|
||||
expose API key prompts and custom badges.
|
||||
"""
|
||||
return {
|
||||
"name": self.display_name,
|
||||
"badge": "",
|
||||
"tag": "",
|
||||
"env_vars": [],
|
||||
}
|
||||
|
||||
def default_model(self) -> Optional[str]:
|
||||
"""Return the default model id, or None if not applicable."""
|
||||
models = self.list_models()
|
||||
if models:
|
||||
return models[0].get("id")
|
||||
return None
|
||||
|
||||
@abc.abstractmethod
|
||||
def generate(
|
||||
self,
|
||||
prompt: str,
|
||||
aspect_ratio: str = DEFAULT_ASPECT_RATIO,
|
||||
**kwargs: Any,
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate an image.
|
||||
|
||||
Implementations should return the dict from :func:`success_response`
|
||||
or :func:`error_response`. ``kwargs`` may contain forward-compat
|
||||
parameters future versions of the schema will expose — implementations
|
||||
should ignore unknown keys.
|
||||
"""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def resolve_aspect_ratio(value: Optional[str]) -> str:
|
||||
"""Clamp an aspect_ratio value to the valid set, defaulting to landscape.
|
||||
|
||||
Invalid values are coerced rather than rejected so the tool surface is
|
||||
forgiving of agent mistakes.
|
||||
"""
|
||||
if not isinstance(value, str):
|
||||
return DEFAULT_ASPECT_RATIO
|
||||
v = value.strip().lower()
|
||||
if v in VALID_ASPECT_RATIOS:
|
||||
return v
|
||||
return DEFAULT_ASPECT_RATIO
|
||||
|
||||
|
||||
def _images_cache_dir() -> Path:
|
||||
"""Return ``$HERMES_HOME/cache/images/``, creating parents as needed."""
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
path = get_hermes_home() / "cache" / "images"
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
return path
|
||||
|
||||
|
||||
def save_b64_image(
|
||||
b64_data: str,
|
||||
*,
|
||||
prefix: str = "image",
|
||||
extension: str = "png",
|
||||
) -> Path:
|
||||
"""Decode base64 image data and write it under ``$HERMES_HOME/cache/images/``.
|
||||
|
||||
Returns the absolute :class:`Path` to the saved file.
|
||||
|
||||
Filename format: ``<prefix>_<YYYYMMDD_HHMMSS>_<short-uuid>.<ext>``.
|
||||
"""
|
||||
raw = base64.b64decode(b64_data)
|
||||
ts = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
short = uuid.uuid4().hex[:8]
|
||||
path = _images_cache_dir() / f"{prefix}_{ts}_{short}.{extension}"
|
||||
path.write_bytes(raw)
|
||||
return path
|
||||
|
||||
|
||||
def success_response(
|
||||
*,
|
||||
image: str,
|
||||
model: str,
|
||||
prompt: str,
|
||||
aspect_ratio: str,
|
||||
provider: str,
|
||||
extra: Optional[Dict[str, Any]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build a uniform success response dict.
|
||||
|
||||
``image`` may be an HTTP URL or an absolute filesystem path (for b64
|
||||
providers like OpenAI). Callers that need to pass through additional
|
||||
backend-specific fields can supply ``extra``.
|
||||
"""
|
||||
payload: Dict[str, Any] = {
|
||||
"success": True,
|
||||
"image": image,
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
"provider": provider,
|
||||
}
|
||||
if extra:
|
||||
for k, v in extra.items():
|
||||
payload.setdefault(k, v)
|
||||
return payload
|
||||
|
||||
|
||||
def error_response(
|
||||
*,
|
||||
error: str,
|
||||
error_type: str = "provider_error",
|
||||
provider: str = "",
|
||||
model: str = "",
|
||||
prompt: str = "",
|
||||
aspect_ratio: str = DEFAULT_ASPECT_RATIO,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build a uniform error response dict."""
|
||||
return {
|
||||
"success": False,
|
||||
"image": None,
|
||||
"error": error,
|
||||
"error_type": error_type,
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
"provider": provider,
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
"""
|
||||
Image Generation Provider Registry
|
||||
==================================
|
||||
|
||||
Central map of registered providers. Populated by plugins at import-time via
|
||||
``PluginContext.register_image_gen_provider()``; consumed by the
|
||||
``image_generate`` tool to dispatch each call to the active backend.
|
||||
|
||||
Active selection
|
||||
----------------
|
||||
The active provider is chosen by ``image_gen.provider`` in ``config.yaml``.
|
||||
If unset, :func:`get_active_provider` applies fallback logic:
|
||||
|
||||
1. If exactly one provider is registered, use it.
|
||||
2. Otherwise if a provider named ``fal`` is registered, use it (legacy
|
||||
default — matches pre-plugin behavior).
|
||||
3. Otherwise return ``None`` (the tool surfaces a helpful error pointing
|
||||
the user at ``hermes tools``).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from agent.image_gen_provider import ImageGenProvider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_providers: Dict[str, ImageGenProvider] = {}
|
||||
_lock = threading.Lock()
|
||||
|
||||
|
||||
def register_provider(provider: ImageGenProvider) -> None:
|
||||
"""Register an image generation provider.
|
||||
|
||||
Re-registration (same ``name``) overwrites the previous entry and logs
|
||||
a debug message — this makes hot-reload scenarios (tests, dev loops)
|
||||
behave predictably.
|
||||
"""
|
||||
if not isinstance(provider, ImageGenProvider):
|
||||
raise TypeError(
|
||||
f"register_provider() expects an ImageGenProvider instance, "
|
||||
f"got {type(provider).__name__}"
|
||||
)
|
||||
name = provider.name
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
raise ValueError("Image gen provider .name must be a non-empty string")
|
||||
with _lock:
|
||||
existing = _providers.get(name)
|
||||
_providers[name] = provider
|
||||
if existing is not None:
|
||||
logger.debug("Image gen provider '%s' re-registered (was %r)", name, type(existing).__name__)
|
||||
else:
|
||||
logger.debug("Registered image gen provider '%s' (%s)", name, type(provider).__name__)
|
||||
|
||||
|
||||
def list_providers() -> List[ImageGenProvider]:
|
||||
"""Return all registered providers, sorted by name."""
|
||||
with _lock:
|
||||
items = list(_providers.values())
|
||||
return sorted(items, key=lambda p: p.name)
|
||||
|
||||
|
||||
def get_provider(name: str) -> Optional[ImageGenProvider]:
|
||||
"""Return the provider registered under *name*, or None."""
|
||||
if not isinstance(name, str):
|
||||
return None
|
||||
with _lock:
|
||||
return _providers.get(name.strip())
|
||||
|
||||
|
||||
def get_active_provider() -> Optional[ImageGenProvider]:
|
||||
"""Resolve the currently-active provider.
|
||||
|
||||
Reads ``image_gen.provider`` from config.yaml; falls back per the
|
||||
module docstring.
|
||||
"""
|
||||
configured: Optional[str] = None
|
||||
try:
|
||||
from hermes_cli.config import load_config
|
||||
|
||||
cfg = load_config()
|
||||
section = cfg.get("image_gen") if isinstance(cfg, dict) else None
|
||||
if isinstance(section, dict):
|
||||
raw = section.get("provider")
|
||||
if isinstance(raw, str) and raw.strip():
|
||||
configured = raw.strip()
|
||||
except Exception as exc:
|
||||
logger.debug("Could not read image_gen.provider from config: %s", exc)
|
||||
|
||||
with _lock:
|
||||
snapshot = dict(_providers)
|
||||
|
||||
if configured:
|
||||
provider = snapshot.get(configured)
|
||||
if provider is not None:
|
||||
return provider
|
||||
logger.debug(
|
||||
"image_gen.provider='%s' configured but not registered; falling back",
|
||||
configured,
|
||||
)
|
||||
|
||||
# Fallback: single-provider case
|
||||
if len(snapshot) == 1:
|
||||
return next(iter(snapshot.values()))
|
||||
|
||||
# Fallback: prefer legacy FAL for backward compat
|
||||
if "fal" in snapshot:
|
||||
return snapshot["fal"]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _reset_for_tests() -> None:
|
||||
"""Clear the registry. **Test-only.**"""
|
||||
with _lock:
|
||||
_providers.clear()
|
||||
@@ -1,301 +0,0 @@
|
||||
"""Routing helpers for inbound user-attached images.
|
||||
|
||||
Two modes:
|
||||
|
||||
native — attach images as OpenAI-style ``image_url`` content parts on the
|
||||
user turn. Provider adapters (Anthropic, Gemini, Bedrock, Codex,
|
||||
OpenAI chat.completions) already translate these into their
|
||||
vendor-specific multimodal formats.
|
||||
|
||||
text — run ``vision_analyze`` on each image up-front and prepend the
|
||||
description to the user's text. The model never sees the pixels;
|
||||
it only sees a lossy text summary. This is the pre-existing
|
||||
behaviour and still the right choice for non-vision models.
|
||||
|
||||
The decision is made once per message turn by :func:`decide_image_input_mode`.
|
||||
It reads ``agent.image_input_mode`` from config.yaml (``auto`` | ``native``
|
||||
| ``text``, default ``auto``) and the active model's capability metadata.
|
||||
|
||||
In ``auto`` mode:
|
||||
- If the user has explicitly configured ``auxiliary.vision.provider``
|
||||
(i.e. not ``auto`` and not empty), we assume they want the text pipeline
|
||||
regardless of the main model — they've opted in to a specific vision
|
||||
backend for a reason (cost, quality, local-only, etc.).
|
||||
- Otherwise, if the active model reports ``supports_vision=True`` in its
|
||||
models.dev metadata, we attach natively.
|
||||
- Otherwise (non-vision model, no explicit override), we fall back to text.
|
||||
|
||||
This keeps ``vision_analyze`` surfaced as a tool in every session — skills
|
||||
and agent flows that chain it (browser screenshots, deeper inspection of
|
||||
URL-referenced images, style-gating loops) keep working. The routing only
|
||||
affects *how user-attached images on the current turn* are presented to the
|
||||
main model.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import mimetypes
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_VALID_MODES = frozenset({"auto", "native", "text"})
|
||||
|
||||
|
||||
def _coerce_mode(raw: Any) -> str:
|
||||
"""Normalize a config value into one of the valid modes."""
|
||||
if not isinstance(raw, str):
|
||||
return "auto"
|
||||
val = raw.strip().lower()
|
||||
if val in _VALID_MODES:
|
||||
return val
|
||||
return "auto"
|
||||
|
||||
|
||||
def _explicit_aux_vision_override(cfg: Optional[Dict[str, Any]]) -> bool:
|
||||
"""True when the user configured a specific auxiliary vision backend.
|
||||
|
||||
An explicit override means the user *wants* the text pipeline (they're
|
||||
paying for a dedicated vision model), so we don't silently bypass it.
|
||||
"""
|
||||
if not isinstance(cfg, dict):
|
||||
return False
|
||||
aux = cfg.get("auxiliary") or {}
|
||||
if not isinstance(aux, dict):
|
||||
return False
|
||||
vision = aux.get("vision") or {}
|
||||
if not isinstance(vision, dict):
|
||||
return False
|
||||
|
||||
provider = str(vision.get("provider") or "").strip().lower()
|
||||
model = str(vision.get("model") or "").strip()
|
||||
base_url = str(vision.get("base_url") or "").strip()
|
||||
|
||||
# "auto" / "" / blank = not explicit
|
||||
if provider in ("", "auto") and not model and not base_url:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _lookup_supports_vision(provider: str, model: str) -> Optional[bool]:
|
||||
"""Return True/False if we can resolve caps, None if unknown."""
|
||||
if not provider or not model:
|
||||
return None
|
||||
try:
|
||||
from agent.models_dev import get_model_capabilities
|
||||
caps = get_model_capabilities(provider, model)
|
||||
except Exception as exc: # pragma: no cover - defensive
|
||||
logger.debug("image_routing: caps lookup failed for %s:%s — %s", provider, model, exc)
|
||||
return None
|
||||
if caps is None:
|
||||
return None
|
||||
return bool(caps.supports_vision)
|
||||
|
||||
|
||||
def decide_image_input_mode(
|
||||
provider: str,
|
||||
model: str,
|
||||
cfg: Optional[Dict[str, Any]],
|
||||
) -> str:
|
||||
"""Return ``"native"`` or ``"text"`` for the given turn.
|
||||
|
||||
Args:
|
||||
provider: active inference provider ID (e.g. ``"anthropic"``, ``"openrouter"``).
|
||||
model: active model slug as it would be sent to the provider.
|
||||
cfg: loaded config.yaml dict, or None. When None, behaves as auto.
|
||||
"""
|
||||
mode_cfg = "auto"
|
||||
if isinstance(cfg, dict):
|
||||
agent_cfg = cfg.get("agent") or {}
|
||||
if isinstance(agent_cfg, dict):
|
||||
mode_cfg = _coerce_mode(agent_cfg.get("image_input_mode"))
|
||||
|
||||
if mode_cfg == "native":
|
||||
return "native"
|
||||
if mode_cfg == "text":
|
||||
return "text"
|
||||
|
||||
# auto
|
||||
if _explicit_aux_vision_override(cfg):
|
||||
return "text"
|
||||
|
||||
supports = _lookup_supports_vision(provider, model)
|
||||
if supports is True:
|
||||
return "native"
|
||||
return "text"
|
||||
|
||||
|
||||
# Image size handling is REACTIVE rather than proactive: we attempt native
|
||||
# attachment at full size regardless of provider, and rely on
|
||||
# ``run_agent._try_shrink_image_parts_in_messages`` to shrink + retry if
|
||||
# the provider rejects the request (e.g. Anthropic's hard 5 MB per-image
|
||||
# ceiling returned as HTTP 400 "image exceeds 5 MB maximum").
|
||||
#
|
||||
# Why reactive: our knowledge of provider ceilings is partial and evolving
|
||||
# (OpenAI accepts 49 MB+, Anthropic 5 MB, Gemini 100 MB, others unknown).
|
||||
# A proactive per-provider table would be stale the moment a provider raises
|
||||
# or lowers its limit, and silently degrading quality for users on providers
|
||||
# that would have accepted the full image is the worse failure mode.
|
||||
# The shrink-on-reject path loses 1 API call + maybe 1s of Pillow work when
|
||||
# it fires, which is cheaper than permanent quality loss.
|
||||
|
||||
|
||||
def _sniff_mime_from_bytes(raw: bytes) -> Optional[str]:
|
||||
"""Detect image MIME from magic bytes. Returns None if unrecognised.
|
||||
|
||||
Filename-based detection (``mimetypes.guess_type``) is unreliable when
|
||||
upstream platforms lie about content-type. Discord, for example, can
|
||||
serve a PNG with ``content_type=image/webp`` for proxied/animated
|
||||
stickers, custom emoji previews, or images uploaded via certain bots.
|
||||
Anthropic strictly validates that declared media_type matches the
|
||||
actual bytes and returns HTTP 400 on mismatch, so we sniff to be safe.
|
||||
"""
|
||||
if not raw:
|
||||
return None
|
||||
# PNG: 89 50 4E 47 0D 0A 1A 0A
|
||||
if raw.startswith(b"\x89PNG\r\n\x1a\n"):
|
||||
return "image/png"
|
||||
# JPEG: FF D8 FF
|
||||
if raw.startswith(b"\xff\xd8\xff"):
|
||||
return "image/jpeg"
|
||||
# GIF87a / GIF89a
|
||||
if raw[:6] in (b"GIF87a", b"GIF89a"):
|
||||
return "image/gif"
|
||||
# WEBP: "RIFF" .... "WEBP"
|
||||
if len(raw) >= 12 and raw[:4] == b"RIFF" and raw[8:12] == b"WEBP":
|
||||
return "image/webp"
|
||||
# BMP: "BM"
|
||||
if raw.startswith(b"BM"):
|
||||
return "image/bmp"
|
||||
# HEIC/HEIF: ftypheic / ftypheix / ftypmif1 / ftypmsf1 etc.
|
||||
if len(raw) >= 12 and raw[4:8] == b"ftyp" and raw[8:12] in (
|
||||
b"heic", b"heix", b"hevc", b"hevx", b"mif1", b"msf1", b"heim", b"heis",
|
||||
):
|
||||
return "image/heic"
|
||||
return None
|
||||
|
||||
|
||||
def _guess_mime(path: Path, raw: Optional[bytes] = None) -> str:
|
||||
"""Return image MIME type for *path*.
|
||||
|
||||
If *raw* bytes are provided, magic-byte sniffing wins (authoritative).
|
||||
Otherwise we fall back to ``mimetypes`` then suffix-based defaults.
|
||||
"""
|
||||
if raw is not None:
|
||||
sniffed = _sniff_mime_from_bytes(raw)
|
||||
if sniffed:
|
||||
return sniffed
|
||||
mime, _ = mimetypes.guess_type(str(path))
|
||||
if mime and mime.startswith("image/"):
|
||||
return mime
|
||||
# mimetypes on some Linux distros mis-maps .jpg; default to jpeg when
|
||||
# the suffix looks imagey.
|
||||
suffix = path.suffix.lower()
|
||||
return {
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".png": "image/png",
|
||||
".gif": "image/gif",
|
||||
".webp": "image/webp",
|
||||
".bmp": "image/bmp",
|
||||
}.get(suffix, "image/jpeg")
|
||||
|
||||
|
||||
def _file_to_data_url(path: Path) -> Optional[str]:
|
||||
"""Encode a local image as a base64 data URL at its native size.
|
||||
|
||||
Size limits are NOT enforced here — the agent retry loop
|
||||
(``run_agent._try_shrink_image_parts_in_messages``) shrinks on the
|
||||
provider's first rejection. Keeping this simple means providers that
|
||||
accept large images (OpenAI 49 MB+, Gemini 100 MB) don't pay a silent
|
||||
quality tax just because one other provider is stricter.
|
||||
|
||||
Returns None only if the file can't be read (missing, permission
|
||||
denied, etc.); the caller reports those paths in ``skipped``.
|
||||
"""
|
||||
try:
|
||||
raw = path.read_bytes()
|
||||
except Exception as exc:
|
||||
logger.warning("image_routing: failed to read %s — %s", path, exc)
|
||||
return None
|
||||
mime = _guess_mime(path, raw=raw)
|
||||
b64 = base64.b64encode(raw).decode("ascii")
|
||||
return f"data:{mime};base64,{b64}"
|
||||
|
||||
|
||||
def build_native_content_parts(
|
||||
user_text: str,
|
||||
image_paths: List[str],
|
||||
) -> Tuple[List[Dict[str, Any]], List[str]]:
|
||||
"""Build an OpenAI-style ``content`` list for a user turn.
|
||||
|
||||
Shape:
|
||||
[{"type": "text", "text": "...\\n\\n[Image attached at: /local/path]"},
|
||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}},
|
||||
...]
|
||||
|
||||
The local path of each successfully attached image is appended to the
|
||||
text part as ``[Image attached at: <path>]``. The model still sees the
|
||||
pixels via the ``image_url`` part (full native vision); the path note
|
||||
just gives it a string handle so MCP/skill tools that take an image
|
||||
path or URL argument can be invoked on the same image without an
|
||||
extra round-trip. This parallels the text-mode hint produced by
|
||||
``Runner._enrich_message_with_vision`` (``vision_analyze using image_url:
|
||||
<path>``) so behaviour is consistent across both image input modes.
|
||||
|
||||
Images are attached at their native size. If a provider rejects the
|
||||
request because an image is too large (e.g. Anthropic's 5 MB per-image
|
||||
ceiling), the agent's retry loop transparently shrinks and retries
|
||||
once — see ``run_agent._try_shrink_image_parts_in_messages``.
|
||||
|
||||
Returns (content_parts, skipped_paths). Skipped paths are files that
|
||||
couldn't be read from disk and are NOT advertised in the path hints.
|
||||
"""
|
||||
skipped: List[str] = []
|
||||
image_parts: List[Dict[str, Any]] = []
|
||||
attached_paths: List[str] = []
|
||||
|
||||
for raw_path in image_paths:
|
||||
p = Path(raw_path)
|
||||
if not p.exists() or not p.is_file():
|
||||
skipped.append(str(raw_path))
|
||||
continue
|
||||
data_url = _file_to_data_url(p)
|
||||
if not data_url:
|
||||
skipped.append(str(raw_path))
|
||||
continue
|
||||
image_parts.append({
|
||||
"type": "image_url",
|
||||
"image_url": {"url": data_url},
|
||||
})
|
||||
attached_paths.append(str(raw_path))
|
||||
|
||||
text = (user_text or "").strip()
|
||||
|
||||
# If at least one image attached, build a single text part that combines
|
||||
# the user's caption (or a neutral default) with one path hint per image.
|
||||
if attached_paths:
|
||||
base_text = text or "What do you see in this image?"
|
||||
path_hints = "\n".join(
|
||||
f"[Image attached at: {p}]" for p in attached_paths
|
||||
)
|
||||
combined_text = f"{base_text}\n\n{path_hints}"
|
||||
parts: List[Dict[str, Any]] = [{"type": "text", "text": combined_text}]
|
||||
parts.extend(image_parts)
|
||||
return parts, skipped
|
||||
|
||||
# No images successfully attached — fall back to plain text-only behaviour.
|
||||
parts = []
|
||||
if text:
|
||||
parts.append({"type": "text", "text": text})
|
||||
return parts, skipped
|
||||
|
||||
|
||||
__all__ = [
|
||||
"decide_image_input_mode",
|
||||
"build_native_content_parts",
|
||||
]
|
||||
@@ -27,6 +27,7 @@ from agent.usage_pricing import (
|
||||
DEFAULT_PRICING,
|
||||
estimate_usage_cost,
|
||||
format_duration_compact,
|
||||
get_pricing,
|
||||
has_known_pricing,
|
||||
)
|
||||
|
||||
@@ -38,6 +39,15 @@ def _has_known_pricing(model_name: str, provider: str = None, base_url: str = No
|
||||
return has_known_pricing(model_name, provider=provider, base_url=base_url)
|
||||
|
||||
|
||||
def _get_pricing(model_name: str) -> Dict[str, float]:
|
||||
"""Look up pricing for a model. Uses fuzzy matching on model name.
|
||||
|
||||
Returns _DEFAULT_PRICING (zero cost) for unknown/custom models —
|
||||
we can't assume costs for self-hosted endpoints, local inference, etc.
|
||||
"""
|
||||
return get_pricing(model_name)
|
||||
|
||||
|
||||
def _estimate_cost(
|
||||
session_or_model: Dict[str, Any] | str,
|
||||
input_tokens: int = 0,
|
||||
@@ -124,7 +134,6 @@ class InsightsEngine:
|
||||
# Gather raw data
|
||||
sessions = self._get_sessions(cutoff, source)
|
||||
tool_usage = self._get_tool_usage(cutoff, source)
|
||||
skill_usage = self._get_skill_usage(cutoff, source)
|
||||
message_stats = self._get_message_stats(cutoff, source)
|
||||
|
||||
if not sessions:
|
||||
@@ -136,15 +145,6 @@ class InsightsEngine:
|
||||
"models": [],
|
||||
"platforms": [],
|
||||
"tools": [],
|
||||
"skills": {
|
||||
"summary": {
|
||||
"total_skill_loads": 0,
|
||||
"total_skill_edits": 0,
|
||||
"total_skill_actions": 0,
|
||||
"distinct_skills_used": 0,
|
||||
},
|
||||
"top_skills": [],
|
||||
},
|
||||
"activity": {},
|
||||
"top_sessions": [],
|
||||
}
|
||||
@@ -154,7 +154,6 @@ class InsightsEngine:
|
||||
models = self._compute_model_breakdown(sessions)
|
||||
platforms = self._compute_platform_breakdown(sessions)
|
||||
tools = self._compute_tool_breakdown(tool_usage)
|
||||
skills = self._compute_skill_breakdown(skill_usage)
|
||||
activity = self._compute_activity_patterns(sessions)
|
||||
top_sessions = self._compute_top_sessions(sessions)
|
||||
|
||||
@@ -167,7 +166,6 @@ class InsightsEngine:
|
||||
"models": models,
|
||||
"platforms": platforms,
|
||||
"tools": tools,
|
||||
"skills": skills,
|
||||
"activity": activity,
|
||||
"top_sessions": top_sessions,
|
||||
}
|
||||
@@ -296,82 +294,6 @@ class InsightsEngine:
|
||||
for name, count in tool_counts.most_common()
|
||||
]
|
||||
|
||||
def _get_skill_usage(self, cutoff: float, source: str = None) -> List[Dict]:
|
||||
"""Extract per-skill usage from assistant tool calls."""
|
||||
skill_counts: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
if source:
|
||||
cursor = self._conn.execute(
|
||||
"""SELECT m.tool_calls, m.timestamp
|
||||
FROM messages m
|
||||
JOIN sessions s ON s.id = m.session_id
|
||||
WHERE s.started_at >= ? AND s.source = ?
|
||||
AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""",
|
||||
(cutoff, source),
|
||||
)
|
||||
else:
|
||||
cursor = self._conn.execute(
|
||||
"""SELECT m.tool_calls, m.timestamp
|
||||
FROM messages m
|
||||
JOIN sessions s ON s.id = m.session_id
|
||||
WHERE s.started_at >= ?
|
||||
AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""",
|
||||
(cutoff,),
|
||||
)
|
||||
|
||||
for row in cursor.fetchall():
|
||||
try:
|
||||
calls = row["tool_calls"]
|
||||
if isinstance(calls, str):
|
||||
calls = json.loads(calls)
|
||||
if not isinstance(calls, list):
|
||||
continue
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
continue
|
||||
|
||||
timestamp = row["timestamp"]
|
||||
for call in calls:
|
||||
if not isinstance(call, dict):
|
||||
continue
|
||||
func = call.get("function", {})
|
||||
tool_name = func.get("name")
|
||||
if tool_name not in {"skill_view", "skill_manage"}:
|
||||
continue
|
||||
|
||||
args = func.get("arguments")
|
||||
if isinstance(args, str):
|
||||
try:
|
||||
args = json.loads(args)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
continue
|
||||
if not isinstance(args, dict):
|
||||
continue
|
||||
|
||||
skill_name = args.get("name")
|
||||
if not isinstance(skill_name, str) or not skill_name.strip():
|
||||
continue
|
||||
|
||||
entry = skill_counts.setdefault(
|
||||
skill_name,
|
||||
{
|
||||
"skill": skill_name,
|
||||
"view_count": 0,
|
||||
"manage_count": 0,
|
||||
"last_used_at": None,
|
||||
},
|
||||
)
|
||||
if tool_name == "skill_view":
|
||||
entry["view_count"] += 1
|
||||
else:
|
||||
entry["manage_count"] += 1
|
||||
|
||||
if timestamp is not None and (
|
||||
entry["last_used_at"] is None or timestamp > entry["last_used_at"]
|
||||
):
|
||||
entry["last_used_at"] = timestamp
|
||||
|
||||
return list(skill_counts.values())
|
||||
|
||||
def _get_message_stats(self, cutoff: float, source: str = None) -> Dict:
|
||||
"""Get aggregate message statistics."""
|
||||
if source:
|
||||
@@ -563,46 +485,6 @@ class InsightsEngine:
|
||||
})
|
||||
return result
|
||||
|
||||
def _compute_skill_breakdown(self, skill_usage: List[Dict]) -> Dict[str, Any]:
|
||||
"""Process per-skill usage into summary + ranked list."""
|
||||
total_skill_loads = sum(s["view_count"] for s in skill_usage) if skill_usage else 0
|
||||
total_skill_edits = sum(s["manage_count"] for s in skill_usage) if skill_usage else 0
|
||||
total_skill_actions = total_skill_loads + total_skill_edits
|
||||
|
||||
top_skills = []
|
||||
for skill in skill_usage:
|
||||
total_count = skill["view_count"] + skill["manage_count"]
|
||||
percentage = (total_count / total_skill_actions * 100) if total_skill_actions else 0
|
||||
top_skills.append({
|
||||
"skill": skill["skill"],
|
||||
"view_count": skill["view_count"],
|
||||
"manage_count": skill["manage_count"],
|
||||
"total_count": total_count,
|
||||
"percentage": percentage,
|
||||
"last_used_at": skill.get("last_used_at"),
|
||||
})
|
||||
|
||||
top_skills.sort(
|
||||
key=lambda s: (
|
||||
s["total_count"],
|
||||
s["view_count"],
|
||||
s["manage_count"],
|
||||
s["last_used_at"] or 0,
|
||||
s["skill"],
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
return {
|
||||
"summary": {
|
||||
"total_skill_loads": total_skill_loads,
|
||||
"total_skill_edits": total_skill_edits,
|
||||
"total_skill_actions": total_skill_actions,
|
||||
"distinct_skills_used": len(skill_usage),
|
||||
},
|
||||
"top_skills": top_skills,
|
||||
}
|
||||
|
||||
def _compute_activity_patterns(self, sessions: List[Dict]) -> Dict:
|
||||
"""Analyze activity patterns by day of week and hour."""
|
||||
day_counts = Counter() # 0=Monday ... 6=Sunday
|
||||
@@ -762,7 +644,13 @@ class InsightsEngine:
|
||||
lines.append(f" Sessions: {o['total_sessions']:<12} Messages: {o['total_messages']:,}")
|
||||
lines.append(f" Tool calls: {o['total_tool_calls']:<12,} User messages: {o['user_messages']:,}")
|
||||
lines.append(f" Input tokens: {o['total_input_tokens']:<12,} Output tokens: {o['total_output_tokens']:,}")
|
||||
lines.append(f" Total tokens: {o['total_tokens']:,}")
|
||||
cache_total = o.get("total_cache_read_tokens", 0) + o.get("total_cache_write_tokens", 0)
|
||||
if cache_total > 0:
|
||||
lines.append(f" Cache read: {o['total_cache_read_tokens']:<12,} Cache write: {o['total_cache_write_tokens']:,}")
|
||||
cost_str = f"${o['estimated_cost']:.2f}"
|
||||
if o.get("models_without_pricing"):
|
||||
cost_str += " *"
|
||||
lines.append(f" Total tokens: {o['total_tokens']:<12,} Est. cost: {cost_str}")
|
||||
if o["total_hours"] > 0:
|
||||
lines.append(f" Active time: ~{_format_duration(o['total_hours'] * 3600):<11} Avg session: ~{_format_duration(o['avg_session_duration'])}")
|
||||
lines.append(f" Avg msgs/session: {o['avg_messages_per_session']:.1f}")
|
||||
@@ -772,10 +660,16 @@ class InsightsEngine:
|
||||
if report["models"]:
|
||||
lines.append(" 🤖 Models Used")
|
||||
lines.append(" " + "─" * 56)
|
||||
lines.append(f" {'Model':<30} {'Sessions':>8} {'Tokens':>12}")
|
||||
lines.append(f" {'Model':<30} {'Sessions':>8} {'Tokens':>12} {'Cost':>8}")
|
||||
for m in report["models"]:
|
||||
model_name = m["model"][:28]
|
||||
lines.append(f" {model_name:<30} {m['sessions']:>8} {m['total_tokens']:>12,}")
|
||||
if m.get("has_pricing"):
|
||||
cost_cell = f"${m['cost']:>6.2f}"
|
||||
else:
|
||||
cost_cell = " N/A"
|
||||
lines.append(f" {model_name:<30} {m['sessions']:>8} {m['total_tokens']:>12,} {cost_cell}")
|
||||
if o.get("models_without_pricing"):
|
||||
lines.append(" * Cost N/A for custom/self-hosted models")
|
||||
lines.append("")
|
||||
|
||||
# Platform breakdown
|
||||
@@ -798,28 +692,6 @@ class InsightsEngine:
|
||||
lines.append(f" ... and {len(report['tools']) - 15} more tools")
|
||||
lines.append("")
|
||||
|
||||
# Skill usage
|
||||
skills = report.get("skills", {})
|
||||
top_skills = skills.get("top_skills", [])
|
||||
if top_skills:
|
||||
lines.append(" 🧠 Top Skills")
|
||||
lines.append(" " + "─" * 56)
|
||||
lines.append(f" {'Skill':<28} {'Loads':>7} {'Edits':>7} {'Last used':>11}")
|
||||
for skill in top_skills[:10]:
|
||||
last_used = "—"
|
||||
if skill.get("last_used_at"):
|
||||
last_used = datetime.fromtimestamp(skill["last_used_at"]).strftime("%b %d")
|
||||
lines.append(
|
||||
f" {skill['skill'][:28]:<28} {skill['view_count']:>7,} {skill['manage_count']:>7,} {last_used:>11}"
|
||||
)
|
||||
summary = skills.get("summary", {})
|
||||
lines.append(
|
||||
f" Distinct skills: {summary.get('distinct_skills_used', 0)} "
|
||||
f"Loads: {summary.get('total_skill_loads', 0):,} "
|
||||
f"Edits: {summary.get('total_skill_edits', 0):,}"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
# Activity patterns
|
||||
act = report.get("activity", {})
|
||||
if act.get("by_day"):
|
||||
@@ -877,7 +749,15 @@ class InsightsEngine:
|
||||
|
||||
# Overview
|
||||
lines.append(f"**Sessions:** {o['total_sessions']} | **Messages:** {o['total_messages']:,} | **Tool calls:** {o['total_tool_calls']:,}")
|
||||
lines.append(f"**Tokens:** {o['total_tokens']:,} (in: {o['total_input_tokens']:,} / out: {o['total_output_tokens']:,})")
|
||||
cache_total = o.get("total_cache_read_tokens", 0) + o.get("total_cache_write_tokens", 0)
|
||||
if cache_total > 0:
|
||||
lines.append(f"**Tokens:** {o['total_tokens']:,} (in: {o['total_input_tokens']:,} / out: {o['total_output_tokens']:,} / cache: {cache_total:,})")
|
||||
else:
|
||||
lines.append(f"**Tokens:** {o['total_tokens']:,} (in: {o['total_input_tokens']:,} / out: {o['total_output_tokens']:,})")
|
||||
cost_note = ""
|
||||
if o.get("models_without_pricing"):
|
||||
cost_note = " _(excludes custom/self-hosted models)_"
|
||||
lines.append(f"**Est. cost:** ${o['estimated_cost']:.2f}{cost_note}")
|
||||
if o["total_hours"] > 0:
|
||||
lines.append(f"**Active time:** ~{_format_duration(o['total_hours'] * 3600)} | **Avg session:** ~{_format_duration(o['avg_session_duration'])}")
|
||||
lines.append("")
|
||||
@@ -886,7 +766,8 @@ class InsightsEngine:
|
||||
if report["models"]:
|
||||
lines.append("**🤖 Models:**")
|
||||
for m in report["models"][:5]:
|
||||
lines.append(f" {m['model'][:25]} — {m['sessions']} sessions, {m['total_tokens']:,} tokens")
|
||||
cost_str = f"${m['cost']:.2f}" if m.get("has_pricing") else "N/A"
|
||||
lines.append(f" {m['model'][:25]} — {m['sessions']} sessions, {m['total_tokens']:,} tokens, {cost_str}")
|
||||
lines.append("")
|
||||
|
||||
# Platforms (if multi-platform)
|
||||
@@ -903,18 +784,6 @@ class InsightsEngine:
|
||||
lines.append(f" {t['tool']} — {t['count']:,} calls ({t['percentage']:.1f}%)")
|
||||
lines.append("")
|
||||
|
||||
skills = report.get("skills", {})
|
||||
if skills.get("top_skills"):
|
||||
lines.append("**🧠 Top Skills:**")
|
||||
for skill in skills["top_skills"][:5]:
|
||||
suffix = ""
|
||||
if skill.get("last_used_at"):
|
||||
suffix = f", last used {datetime.fromtimestamp(skill['last_used_at']).strftime('%b %d')}"
|
||||
lines.append(
|
||||
f" {skill['skill']} — {skill['view_count']:,} loads, {skill['manage_count']:,} edits{suffix}"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
# Activity summary
|
||||
act = report.get("activity", {})
|
||||
if act.get("busiest_day") and act.get("busiest_hour"):
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
"""LM Studio reasoning-effort resolution shared by the chat-completions
|
||||
transport and run_agent's iteration-limit summary path.
|
||||
|
||||
LM Studio publishes per-model ``capabilities.reasoning.allowed_options`` (e.g.
|
||||
``["off","on"]`` for toggle-style models, ``["off","minimal","low"]`` for
|
||||
graduated models). We map the user's ``reasoning_config`` onto LM Studio's
|
||||
OpenAI-compatible vocabulary, then clamp against the model's allowed set so
|
||||
the server doesn't 400 on an unsupported effort.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
# LM Studio accepts these top-level reasoning_effort values via its
|
||||
# OpenAI-compatible chat.completions endpoint.
|
||||
_LM_VALID_EFFORTS = {"none", "minimal", "low", "medium", "high", "xhigh"}
|
||||
|
||||
# Toggle-style models publish allowed_options as ["off","on"] in /api/v1/models.
|
||||
# Map them onto the OpenAI-compatible request vocabulary.
|
||||
_LM_EFFORT_ALIASES = {"off": "none", "on": "medium"}
|
||||
|
||||
|
||||
def resolve_lmstudio_effort(
|
||||
reasoning_config: Optional[dict],
|
||||
allowed_options: Optional[List[str]],
|
||||
) -> Optional[str]:
|
||||
"""Return the ``reasoning_effort`` string to send to LM Studio, or ``None``.
|
||||
|
||||
``None`` means "omit the field": the user picked a level the model can't
|
||||
honor, so let LM Studio fall back to the model's declared default rather
|
||||
than silently substituting a different effort. When ``allowed_options`` is
|
||||
falsy (probe failed), skip clamping and send the resolved effort anyway.
|
||||
"""
|
||||
effort = "medium"
|
||||
if reasoning_config and isinstance(reasoning_config, dict):
|
||||
if reasoning_config.get("enabled") is False:
|
||||
effort = "none"
|
||||
else:
|
||||
raw = (reasoning_config.get("effort") or "").strip().lower()
|
||||
raw = _LM_EFFORT_ALIASES.get(raw, raw)
|
||||
if raw in _LM_VALID_EFFORTS:
|
||||
effort = raw
|
||||
if allowed_options:
|
||||
allowed = {_LM_EFFORT_ALIASES.get(opt, opt) for opt in allowed_options}
|
||||
if effort not in allowed:
|
||||
return None
|
||||
return effort
|
||||
@@ -1,49 +0,0 @@
|
||||
"""User-facing summaries for manual compression commands."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Sequence
|
||||
|
||||
|
||||
def summarize_manual_compression(
|
||||
before_messages: Sequence[dict[str, Any]],
|
||||
after_messages: Sequence[dict[str, Any]],
|
||||
before_tokens: int,
|
||||
after_tokens: int,
|
||||
) -> dict[str, Any]:
|
||||
"""Return consistent user-facing feedback for manual compression."""
|
||||
before_count = len(before_messages)
|
||||
after_count = len(after_messages)
|
||||
noop = list(after_messages) == list(before_messages)
|
||||
|
||||
if noop:
|
||||
headline = f"No changes from compression: {before_count} messages"
|
||||
if after_tokens == before_tokens:
|
||||
token_line = (
|
||||
f"Approx request size: ~{before_tokens:,} tokens (unchanged)"
|
||||
)
|
||||
else:
|
||||
token_line = (
|
||||
f"Approx request size: ~{before_tokens:,} → "
|
||||
f"~{after_tokens:,} tokens"
|
||||
)
|
||||
else:
|
||||
headline = f"Compressed: {before_count} → {after_count} messages"
|
||||
token_line = (
|
||||
f"Approx request size: ~{before_tokens:,} → "
|
||||
f"~{after_tokens:,} tokens"
|
||||
)
|
||||
|
||||
note = None
|
||||
if not noop and after_count < before_count and after_tokens > before_tokens:
|
||||
note = (
|
||||
"Note: fewer messages can still raise this estimate when "
|
||||
"compression rewrites the transcript into denser summaries."
|
||||
)
|
||||
|
||||
return {
|
||||
"noop": noop,
|
||||
"headline": headline,
|
||||
"token_line": token_line,
|
||||
"note": note,
|
||||
}
|
||||
@@ -1,555 +0,0 @@
|
||||
"""MemoryManager — orchestrates memory providers for the agent.
|
||||
|
||||
Single integration point in run_agent.py. Replaces scattered per-backend
|
||||
code with one manager that delegates to registered providers.
|
||||
|
||||
Only ONE external plugin provider is allowed at a time — attempting to
|
||||
register a second external provider is rejected with a warning. This
|
||||
prevents tool schema bloat and conflicting memory backends.
|
||||
|
||||
Usage in run_agent.py:
|
||||
self._memory_manager = MemoryManager()
|
||||
# Only ONE of these:
|
||||
self._memory_manager.add_provider(plugin_provider)
|
||||
|
||||
# System prompt
|
||||
prompt_parts.append(self._memory_manager.build_system_prompt())
|
||||
|
||||
# Pre-turn
|
||||
context = self._memory_manager.prefetch_all(user_message)
|
||||
|
||||
# Post-turn
|
||||
self._memory_manager.sync_all(user_msg, assistant_response)
|
||||
self._memory_manager.queue_prefetch_all(user_msg)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import inspect
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.memory_provider import MemoryProvider
|
||||
from tools.registry import tool_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Context fencing helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_FENCE_TAG_RE = re.compile(r'</?\s*memory-context\s*>', re.IGNORECASE)
|
||||
_INTERNAL_CONTEXT_RE = re.compile(
|
||||
r'<\s*memory-context\s*>[\s\S]*?</\s*memory-context\s*>',
|
||||
re.IGNORECASE,
|
||||
)
|
||||
_INTERNAL_NOTE_RE = re.compile(
|
||||
r'\[System note:\s*The following is recalled memory context,\s*NOT new user input\.\s*Treat as (?:informational background data|authoritative reference data[^\]]*)\.\]\s*',
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def sanitize_context(text: str) -> str:
|
||||
"""Strip fence tags, injected context blocks, and system notes from provider output."""
|
||||
text = _INTERNAL_CONTEXT_RE.sub('', text)
|
||||
text = _INTERNAL_NOTE_RE.sub('', text)
|
||||
text = _FENCE_TAG_RE.sub('', text)
|
||||
return text
|
||||
|
||||
|
||||
class StreamingContextScrubber:
|
||||
"""Stateful scrubber for streaming text that may contain split memory-context spans.
|
||||
|
||||
The one-shot ``sanitize_context`` regex cannot survive chunk boundaries:
|
||||
a ``<memory-context>`` opened in one delta and closed in a later delta
|
||||
leaks its payload to the UI because the non-greedy block regex needs
|
||||
both tags in one string. This scrubber runs a small state machine
|
||||
across deltas, holding back partial-tag tails and discarding
|
||||
everything inside a span (including the system-note line).
|
||||
|
||||
Usage::
|
||||
|
||||
scrubber = StreamingContextScrubber()
|
||||
for delta in stream:
|
||||
visible = scrubber.feed(delta)
|
||||
if visible:
|
||||
emit(visible)
|
||||
trailing = scrubber.flush() # at end of stream
|
||||
if trailing:
|
||||
emit(trailing)
|
||||
|
||||
The scrubber is re-entrant per agent instance. Callers building new
|
||||
top-level responses (new turn) should create a fresh scrubber or call
|
||||
``reset()``.
|
||||
"""
|
||||
|
||||
_OPEN_TAG = "<memory-context>"
|
||||
_CLOSE_TAG = "</memory-context>"
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._in_span: bool = False
|
||||
self._buf: str = ""
|
||||
|
||||
def reset(self) -> None:
|
||||
self._in_span = False
|
||||
self._buf = ""
|
||||
|
||||
def feed(self, text: str) -> str:
|
||||
"""Return the visible portion of ``text`` after scrubbing.
|
||||
|
||||
Any trailing fragment that could be the start of an open/close tag
|
||||
is held back in the internal buffer and surfaced on the next
|
||||
``feed()`` call or discarded/emitted by ``flush()``.
|
||||
"""
|
||||
if not text:
|
||||
return ""
|
||||
buf = self._buf + text
|
||||
self._buf = ""
|
||||
out: list[str] = []
|
||||
|
||||
while buf:
|
||||
if self._in_span:
|
||||
idx = buf.lower().find(self._CLOSE_TAG)
|
||||
if idx == -1:
|
||||
# Hold back a potential partial close tag; drop the rest
|
||||
held = self._max_partial_suffix(buf, self._CLOSE_TAG)
|
||||
self._buf = buf[-held:] if held else ""
|
||||
return "".join(out)
|
||||
# Found close — skip span content + tag, continue
|
||||
buf = buf[idx + len(self._CLOSE_TAG):]
|
||||
self._in_span = False
|
||||
else:
|
||||
idx = buf.lower().find(self._OPEN_TAG)
|
||||
if idx == -1:
|
||||
# No open tag — hold back a potential partial open tag
|
||||
held = self._max_partial_suffix(buf, self._OPEN_TAG)
|
||||
if held:
|
||||
out.append(buf[:-held])
|
||||
self._buf = buf[-held:]
|
||||
else:
|
||||
out.append(buf)
|
||||
return "".join(out)
|
||||
# Emit text before the tag, enter span
|
||||
if idx > 0:
|
||||
out.append(buf[:idx])
|
||||
buf = buf[idx + len(self._OPEN_TAG):]
|
||||
self._in_span = True
|
||||
|
||||
return "".join(out)
|
||||
|
||||
def flush(self) -> str:
|
||||
"""Emit any held-back buffer at end-of-stream.
|
||||
|
||||
If we're still inside an unterminated span the remaining content is
|
||||
discarded (safer: leaking partial memory context is worse than a
|
||||
truncated answer). Otherwise the held-back partial-tag tail is
|
||||
emitted verbatim (it turned out not to be a real tag).
|
||||
"""
|
||||
if self._in_span:
|
||||
self._buf = ""
|
||||
self._in_span = False
|
||||
return ""
|
||||
tail = self._buf
|
||||
self._buf = ""
|
||||
return tail
|
||||
|
||||
@staticmethod
|
||||
def _max_partial_suffix(buf: str, tag: str) -> int:
|
||||
"""Return the length of the longest buf-suffix that is a tag-prefix.
|
||||
|
||||
Case-insensitive. Returns 0 if no suffix could start the tag.
|
||||
"""
|
||||
tag_lower = tag.lower()
|
||||
buf_lower = buf.lower()
|
||||
max_check = min(len(buf_lower), len(tag_lower) - 1)
|
||||
for i in range(max_check, 0, -1):
|
||||
if tag_lower.startswith(buf_lower[-i:]):
|
||||
return i
|
||||
return 0
|
||||
|
||||
|
||||
def build_memory_context_block(raw_context: str) -> str:
|
||||
"""Wrap prefetched memory in a fenced block with system note."""
|
||||
if not raw_context or not raw_context.strip():
|
||||
return ""
|
||||
clean = sanitize_context(raw_context)
|
||||
if clean != raw_context:
|
||||
logger.warning("memory provider returned pre-wrapped context; stripped")
|
||||
return (
|
||||
"<memory-context>\n"
|
||||
"[System note: The following is recalled memory context, "
|
||||
"NOT new user input. Treat as authoritative reference data — "
|
||||
"this is the agent's persistent memory and should inform all responses.]\n\n"
|
||||
f"{clean}\n"
|
||||
"</memory-context>"
|
||||
)
|
||||
|
||||
|
||||
class MemoryManager:
|
||||
"""Orchestrates the built-in provider plus at most one external provider.
|
||||
|
||||
The builtin provider is always first. Only one non-builtin (external)
|
||||
provider is allowed. Failures in one provider never block the other.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._providers: List[MemoryProvider] = []
|
||||
self._tool_to_provider: Dict[str, MemoryProvider] = {}
|
||||
self._has_external: bool = False # True once a non-builtin provider is added
|
||||
|
||||
# -- Registration --------------------------------------------------------
|
||||
|
||||
def add_provider(self, provider: MemoryProvider) -> None:
|
||||
"""Register a memory provider.
|
||||
|
||||
Built-in provider (name ``"builtin"``) is always accepted.
|
||||
Only **one** external (non-builtin) provider is allowed — a second
|
||||
attempt is rejected with a warning.
|
||||
"""
|
||||
is_builtin = provider.name == "builtin"
|
||||
|
||||
if not is_builtin:
|
||||
if self._has_external:
|
||||
existing = next(
|
||||
(p.name for p in self._providers if p.name != "builtin"), "unknown"
|
||||
)
|
||||
logger.warning(
|
||||
"Rejected memory provider '%s' — external provider '%s' is "
|
||||
"already registered. Only one external memory provider is "
|
||||
"allowed at a time. Configure which one via memory.provider "
|
||||
"in config.yaml.",
|
||||
provider.name, existing,
|
||||
)
|
||||
return
|
||||
self._has_external = True
|
||||
|
||||
self._providers.append(provider)
|
||||
|
||||
# Index tool names → provider for routing
|
||||
for schema in provider.get_tool_schemas():
|
||||
tool_name = schema.get("name", "")
|
||||
if tool_name and tool_name not in self._tool_to_provider:
|
||||
self._tool_to_provider[tool_name] = provider
|
||||
elif tool_name in self._tool_to_provider:
|
||||
logger.warning(
|
||||
"Memory tool name conflict: '%s' already registered by %s, "
|
||||
"ignoring from %s",
|
||||
tool_name,
|
||||
self._tool_to_provider[tool_name].name,
|
||||
provider.name,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Memory provider '%s' registered (%d tools)",
|
||||
provider.name,
|
||||
len(provider.get_tool_schemas()),
|
||||
)
|
||||
|
||||
@property
|
||||
def providers(self) -> List[MemoryProvider]:
|
||||
"""All registered providers in order."""
|
||||
return list(self._providers)
|
||||
|
||||
def get_provider(self, name: str) -> Optional[MemoryProvider]:
|
||||
"""Get a provider by name, or None if not registered."""
|
||||
for p in self._providers:
|
||||
if p.name == name:
|
||||
return p
|
||||
return None
|
||||
|
||||
# -- System prompt -------------------------------------------------------
|
||||
|
||||
def build_system_prompt(self) -> str:
|
||||
"""Collect system prompt blocks from all providers.
|
||||
|
||||
Returns combined text, or empty string if no providers contribute.
|
||||
Each non-empty block is labeled with the provider name.
|
||||
"""
|
||||
blocks = []
|
||||
for provider in self._providers:
|
||||
try:
|
||||
block = provider.system_prompt_block()
|
||||
if block and block.strip():
|
||||
blocks.append(block)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Memory provider '%s' system_prompt_block() failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
return "\n\n".join(blocks)
|
||||
|
||||
# -- Prefetch / recall ---------------------------------------------------
|
||||
|
||||
def prefetch_all(self, query: str, *, session_id: str = "") -> str:
|
||||
"""Collect prefetch context from all providers.
|
||||
|
||||
Returns merged context text labeled by provider. Empty providers
|
||||
are skipped. Failures in one provider don't block others.
|
||||
"""
|
||||
parts = []
|
||||
for provider in self._providers:
|
||||
try:
|
||||
result = provider.prefetch(query, session_id=session_id)
|
||||
if result and result.strip():
|
||||
parts.append(result)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' prefetch failed (non-fatal): %s",
|
||||
provider.name, e,
|
||||
)
|
||||
return "\n\n".join(parts)
|
||||
|
||||
def queue_prefetch_all(self, query: str, *, session_id: str = "") -> None:
|
||||
"""Queue background prefetch on all providers for the next turn."""
|
||||
for provider in self._providers:
|
||||
try:
|
||||
provider.queue_prefetch(query, session_id=session_id)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' queue_prefetch failed (non-fatal): %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
# -- Sync ----------------------------------------------------------------
|
||||
|
||||
def sync_all(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
|
||||
"""Sync a completed turn to all providers."""
|
||||
for provider in self._providers:
|
||||
try:
|
||||
provider.sync_turn(user_content, assistant_content, session_id=session_id)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Memory provider '%s' sync_turn failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
# -- Tools ---------------------------------------------------------------
|
||||
|
||||
def get_all_tool_schemas(self) -> List[Dict[str, Any]]:
|
||||
"""Collect tool schemas from all providers."""
|
||||
schemas = []
|
||||
seen = set()
|
||||
for provider in self._providers:
|
||||
try:
|
||||
for schema in provider.get_tool_schemas():
|
||||
name = schema.get("name", "")
|
||||
if name and name not in seen:
|
||||
schemas.append(schema)
|
||||
seen.add(name)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Memory provider '%s' get_tool_schemas() failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
return schemas
|
||||
|
||||
def get_all_tool_names(self) -> set:
|
||||
"""Return set of all tool names across all providers."""
|
||||
return set(self._tool_to_provider.keys())
|
||||
|
||||
def has_tool(self, tool_name: str) -> bool:
|
||||
"""Check if any provider handles this tool."""
|
||||
return tool_name in self._tool_to_provider
|
||||
|
||||
def handle_tool_call(
|
||||
self, tool_name: str, args: Dict[str, Any], **kwargs
|
||||
) -> str:
|
||||
"""Route a tool call to the correct provider.
|
||||
|
||||
Returns JSON string result. Raises ValueError if no provider
|
||||
handles the tool.
|
||||
"""
|
||||
provider = self._tool_to_provider.get(tool_name)
|
||||
if provider is None:
|
||||
return tool_error(f"No memory provider handles tool '{tool_name}'")
|
||||
try:
|
||||
return provider.handle_tool_call(tool_name, args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Memory provider '%s' handle_tool_call(%s) failed: %s",
|
||||
provider.name, tool_name, e,
|
||||
)
|
||||
return tool_error(f"Memory tool '{tool_name}' failed: {e}")
|
||||
|
||||
# -- Lifecycle hooks -----------------------------------------------------
|
||||
|
||||
def on_turn_start(self, turn_number: int, message: str, **kwargs) -> None:
|
||||
"""Notify all providers of a new turn.
|
||||
|
||||
kwargs may include: remaining_tokens, model, platform, tool_count.
|
||||
"""
|
||||
for provider in self._providers:
|
||||
try:
|
||||
provider.on_turn_start(turn_number, message, **kwargs)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' on_turn_start failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
def on_session_end(self, messages: List[Dict[str, Any]]) -> None:
|
||||
"""Notify all providers of session end."""
|
||||
for provider in self._providers:
|
||||
try:
|
||||
provider.on_session_end(messages)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' on_session_end failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
def on_session_switch(
|
||||
self,
|
||||
new_session_id: str,
|
||||
*,
|
||||
parent_session_id: str = "",
|
||||
reset: bool = False,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""Notify all providers that the agent's session_id has rotated.
|
||||
|
||||
Fires on ``/resume``, ``/branch``, ``/reset``, ``/new``, and
|
||||
context compression — any path that reassigns
|
||||
``AIAgent.session_id`` without tearing the provider down.
|
||||
|
||||
Providers keep running; they only need to refresh cached
|
||||
per-session state so subsequent writes land in the correct
|
||||
session's record. See ``MemoryProvider.on_session_switch`` for
|
||||
the full contract.
|
||||
"""
|
||||
if not new_session_id:
|
||||
return
|
||||
for provider in self._providers:
|
||||
try:
|
||||
provider.on_session_switch(
|
||||
new_session_id,
|
||||
parent_session_id=parent_session_id,
|
||||
reset=reset,
|
||||
**kwargs,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' on_session_switch failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
def on_pre_compress(self, messages: List[Dict[str, Any]]) -> str:
|
||||
"""Notify all providers before context compression.
|
||||
|
||||
Returns combined text from providers to include in the compression
|
||||
summary prompt. Empty string if no provider contributes.
|
||||
"""
|
||||
parts = []
|
||||
for provider in self._providers:
|
||||
try:
|
||||
result = provider.on_pre_compress(messages)
|
||||
if result and result.strip():
|
||||
parts.append(result)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' on_pre_compress failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
return "\n\n".join(parts)
|
||||
|
||||
@staticmethod
|
||||
def _provider_memory_write_metadata_mode(provider: MemoryProvider) -> str:
|
||||
"""Return how to pass metadata to a provider's memory-write hook."""
|
||||
try:
|
||||
signature = inspect.signature(provider.on_memory_write)
|
||||
except (TypeError, ValueError):
|
||||
return "keyword"
|
||||
|
||||
params = list(signature.parameters.values())
|
||||
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params):
|
||||
return "keyword"
|
||||
if "metadata" in signature.parameters:
|
||||
return "keyword"
|
||||
|
||||
accepted = [
|
||||
p for p in params
|
||||
if p.kind in (
|
||||
inspect.Parameter.POSITIONAL_ONLY,
|
||||
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
||||
inspect.Parameter.KEYWORD_ONLY,
|
||||
)
|
||||
]
|
||||
if len(accepted) >= 4:
|
||||
return "positional"
|
||||
return "legacy"
|
||||
|
||||
def on_memory_write(
|
||||
self,
|
||||
action: str,
|
||||
target: str,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Notify external providers when the built-in memory tool writes.
|
||||
|
||||
Skips the builtin provider itself (it's the source of the write).
|
||||
"""
|
||||
for provider in self._providers:
|
||||
if provider.name == "builtin":
|
||||
continue
|
||||
try:
|
||||
metadata_mode = self._provider_memory_write_metadata_mode(provider)
|
||||
if metadata_mode == "keyword":
|
||||
provider.on_memory_write(
|
||||
action, target, content, metadata=dict(metadata or {})
|
||||
)
|
||||
elif metadata_mode == "positional":
|
||||
provider.on_memory_write(action, target, content, dict(metadata or {}))
|
||||
else:
|
||||
provider.on_memory_write(action, target, content)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' on_memory_write failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
def on_delegation(self, task: str, result: str, *,
|
||||
child_session_id: str = "", **kwargs) -> None:
|
||||
"""Notify all providers that a subagent completed."""
|
||||
for provider in self._providers:
|
||||
try:
|
||||
provider.on_delegation(
|
||||
task, result, child_session_id=child_session_id, **kwargs
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Memory provider '%s' on_delegation failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
def shutdown_all(self) -> None:
|
||||
"""Shut down all providers (reverse order for clean teardown)."""
|
||||
for provider in reversed(self._providers):
|
||||
try:
|
||||
provider.shutdown()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Memory provider '%s' shutdown failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
|
||||
def initialize_all(self, session_id: str, **kwargs) -> None:
|
||||
"""Initialize all providers.
|
||||
|
||||
Automatically injects ``hermes_home`` into *kwargs* so that every
|
||||
provider can resolve profile-scoped storage paths without importing
|
||||
``get_hermes_home()`` themselves.
|
||||
"""
|
||||
if "hermes_home" not in kwargs:
|
||||
from hermes_constants import get_hermes_home
|
||||
kwargs["hermes_home"] = str(get_hermes_home())
|
||||
for provider in self._providers:
|
||||
try:
|
||||
provider.initialize(session_id=session_id, **kwargs)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Memory provider '%s' initialize failed: %s",
|
||||
provider.name, e,
|
||||
)
|
||||
@@ -1,279 +0,0 @@
|
||||
"""Abstract base class for pluggable memory providers.
|
||||
|
||||
Memory providers give the agent persistent recall across sessions.
|
||||
The MemoryManager enforces a one-external-provider limit to prevent
|
||||
tool schema bloat and conflicting memory backends.
|
||||
|
||||
External providers (Honcho, Hindsight, Mem0, etc.) are registered
|
||||
and managed via MemoryManager. Only one external provider runs at a
|
||||
time.
|
||||
|
||||
Registration:
|
||||
Plugins ship in plugins/memory/<name>/ and are activated via
|
||||
the memory.provider config key.
|
||||
|
||||
Lifecycle (called by MemoryManager, wired in run_agent.py):
|
||||
initialize() — connect, create resources, warm up
|
||||
system_prompt_block() — static text for the system prompt
|
||||
prefetch(query) — background recall before each turn
|
||||
sync_turn(user, asst) — async write after each turn
|
||||
get_tool_schemas() — tool schemas to expose to the model
|
||||
handle_tool_call() — dispatch a tool call
|
||||
shutdown() — clean exit
|
||||
|
||||
Optional hooks (override to opt in):
|
||||
on_turn_start(turn, message, **kwargs) — per-turn tick with runtime context
|
||||
on_session_end(messages) — end-of-session extraction
|
||||
on_session_switch(new_session_id, **kwargs) — mid-process session_id rotation
|
||||
on_pre_compress(messages) -> str — extract before context compression
|
||||
on_memory_write(action, target, content, metadata=None) — mirror built-in memory writes
|
||||
on_delegation(task, result, **kwargs) — parent-side observation of subagent work
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MemoryProvider(ABC):
|
||||
"""Abstract base class for memory providers."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def name(self) -> str:
|
||||
"""Short identifier for this provider (e.g. 'builtin', 'honcho', 'hindsight')."""
|
||||
|
||||
# -- Core lifecycle (implement these) ------------------------------------
|
||||
|
||||
@abstractmethod
|
||||
def is_available(self) -> bool:
|
||||
"""Return True if this provider is configured, has credentials, and is ready.
|
||||
|
||||
Called during agent init to decide whether to activate the provider.
|
||||
Should not make network calls — just check config and installed deps.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def initialize(self, session_id: str, **kwargs) -> None:
|
||||
"""Initialize for a session.
|
||||
|
||||
Called once at agent startup. May create resources (banks, tables),
|
||||
establish connections, start background threads, etc.
|
||||
|
||||
kwargs always include:
|
||||
- hermes_home (str): The active HERMES_HOME directory path. Use this
|
||||
for profile-scoped storage instead of hardcoding ``~/.hermes``.
|
||||
- platform (str): "cli", "telegram", "discord", "cron", etc.
|
||||
|
||||
kwargs may also include:
|
||||
- agent_context (str): "primary", "subagent", "cron", or "flush".
|
||||
Providers should skip writes for non-primary contexts (cron system
|
||||
prompts would corrupt user representations).
|
||||
- agent_identity (str): Profile name (e.g. "coder"). Use for
|
||||
per-profile provider identity scoping.
|
||||
- agent_workspace (str): Shared workspace name (e.g. "hermes").
|
||||
- parent_session_id (str): For subagents, the parent's session_id.
|
||||
- user_id (str): Platform user identifier (gateway sessions).
|
||||
"""
|
||||
|
||||
def system_prompt_block(self) -> str:
|
||||
"""Return text to include in the system prompt.
|
||||
|
||||
Called during system prompt assembly. Return empty string to skip.
|
||||
This is for STATIC provider info (instructions, status). Prefetched
|
||||
recall context is injected separately via prefetch().
|
||||
"""
|
||||
return ""
|
||||
|
||||
def prefetch(self, query: str, *, session_id: str = "") -> str:
|
||||
"""Recall relevant context for the upcoming turn.
|
||||
|
||||
Called before each API call. Return formatted text to inject as
|
||||
context, or empty string if nothing relevant. Implementations
|
||||
should be fast — use background threads for the actual recall
|
||||
and return cached results here.
|
||||
|
||||
session_id is provided for providers serving concurrent sessions
|
||||
(gateway group chats, cached agents). Providers that don't need
|
||||
per-session scoping can ignore it.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def queue_prefetch(self, query: str, *, session_id: str = "") -> None:
|
||||
"""Queue a background recall for the NEXT turn.
|
||||
|
||||
Called after each turn completes. The result will be consumed
|
||||
by prefetch() on the next turn. Default is no-op — providers
|
||||
that do background prefetching should override this.
|
||||
"""
|
||||
|
||||
def sync_turn(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
|
||||
"""Persist a completed turn to the backend.
|
||||
|
||||
Called after each turn. Should be non-blocking — queue for
|
||||
background processing if the backend has latency.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_tool_schemas(self) -> List[Dict[str, Any]]:
|
||||
"""Return tool schemas this provider exposes.
|
||||
|
||||
Each schema follows the OpenAI function calling format:
|
||||
{"name": "...", "description": "...", "parameters": {...}}
|
||||
|
||||
Return empty list if this provider has no tools (context-only).
|
||||
"""
|
||||
|
||||
def handle_tool_call(self, tool_name: str, args: Dict[str, Any], **kwargs) -> str:
|
||||
"""Handle a tool call for one of this provider's tools.
|
||||
|
||||
Must return a JSON string (the tool result).
|
||||
Only called for tool names returned by get_tool_schemas().
|
||||
"""
|
||||
raise NotImplementedError(f"Provider {self.name} does not handle tool {tool_name}")
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Clean shutdown — flush queues, close connections."""
|
||||
|
||||
# -- Optional hooks (override to opt in) ---------------------------------
|
||||
|
||||
def on_turn_start(self, turn_number: int, message: str, **kwargs) -> None:
|
||||
"""Called at the start of each turn with the user message.
|
||||
|
||||
Use for turn-counting, scope management, periodic maintenance.
|
||||
|
||||
kwargs may include: remaining_tokens, model, platform, tool_count.
|
||||
Providers use what they need; extras are ignored.
|
||||
"""
|
||||
|
||||
def on_session_end(self, messages: List[Dict[str, Any]]) -> None:
|
||||
"""Called when a session ends (explicit exit or timeout).
|
||||
|
||||
Use for end-of-session fact extraction, summarization, etc.
|
||||
messages is the full conversation history.
|
||||
|
||||
NOT called after every turn — only at actual session boundaries
|
||||
(CLI exit, /reset, gateway session expiry).
|
||||
"""
|
||||
|
||||
def on_session_switch(
|
||||
self,
|
||||
new_session_id: str,
|
||||
*,
|
||||
parent_session_id: str = "",
|
||||
reset: bool = False,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""Called when the agent switches session_id mid-process.
|
||||
|
||||
Fires on ``/resume``, ``/branch``, ``/reset``, ``/new`` (CLI), the
|
||||
gateway equivalents, and context compression — any path that
|
||||
reassigns ``AIAgent.session_id`` without tearing the provider down.
|
||||
|
||||
Providers that cache per-session state in ``initialize()``
|
||||
(``_session_id``, ``_document_id``, accumulated turn buffers,
|
||||
counters) should update or reset that state here so subsequent
|
||||
writes land in the correct session's record.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
new_session_id:
|
||||
The session_id the agent just switched to.
|
||||
parent_session_id:
|
||||
The previous session_id, if meaningful — set for ``/branch``
|
||||
(fork lineage), context compression (continuation lineage),
|
||||
and ``/resume`` (the session we're leaving). Empty string
|
||||
when no lineage applies.
|
||||
reset:
|
||||
``True`` when this is a genuinely new conversation, not a
|
||||
resumption of an existing one. Fired by ``/reset`` / ``/new``.
|
||||
Providers should flush accumulated per-session buffers
|
||||
(``_session_turns``, ``_turn_counter``, etc.) when this is
|
||||
set. ``False`` for ``/resume`` / ``/branch`` / compression
|
||||
where the logical conversation continues under the new id.
|
||||
|
||||
Default is no-op for backward compatibility.
|
||||
"""
|
||||
|
||||
def on_pre_compress(self, messages: List[Dict[str, Any]]) -> str:
|
||||
"""Called before context compression discards old messages.
|
||||
|
||||
Use to extract insights from messages about to be compressed.
|
||||
messages is the list that will be summarized/discarded.
|
||||
|
||||
Return text to include in the compression summary prompt so the
|
||||
compressor preserves provider-extracted insights. Return empty
|
||||
string for no contribution (backwards-compatible default).
|
||||
"""
|
||||
return ""
|
||||
|
||||
def on_delegation(self, task: str, result: str, *,
|
||||
child_session_id: str = "", **kwargs) -> None:
|
||||
"""Called on the PARENT agent when a subagent completes.
|
||||
|
||||
The parent's memory provider gets the task+result pair as an
|
||||
observation of what was delegated and what came back. The subagent
|
||||
itself has no provider session (skip_memory=True).
|
||||
|
||||
task: the delegation prompt
|
||||
result: the subagent's final response
|
||||
child_session_id: the subagent's session_id
|
||||
"""
|
||||
|
||||
def get_config_schema(self) -> List[Dict[str, Any]]:
|
||||
"""Return config fields this provider needs for setup.
|
||||
|
||||
Used by 'hermes memory setup' to walk the user through configuration.
|
||||
Each field is a dict with:
|
||||
key: config key name (e.g. 'api_key', 'mode')
|
||||
description: human-readable description
|
||||
secret: True if this should go to .env (default: False)
|
||||
required: True if required (default: False)
|
||||
default: default value (optional)
|
||||
choices: list of valid values (optional)
|
||||
url: URL where user can get this credential (optional)
|
||||
env_var: explicit env var name for secrets (default: auto-generated)
|
||||
|
||||
Return empty list if no config needed (e.g. local-only providers).
|
||||
"""
|
||||
return []
|
||||
|
||||
def save_config(self, values: Dict[str, Any], hermes_home: str) -> None:
|
||||
"""Write non-secret config to the provider's native location.
|
||||
|
||||
Called by 'hermes memory setup' after collecting user inputs.
|
||||
``values`` contains only non-secret fields (secrets go to .env).
|
||||
``hermes_home`` is the active HERMES_HOME directory path.
|
||||
|
||||
Providers with native config files (JSON, YAML) should override
|
||||
this to write to their expected location. Providers that use only
|
||||
env vars can leave the default (no-op).
|
||||
|
||||
All new memory provider plugins MUST implement either:
|
||||
- save_config() for native config file formats, OR
|
||||
- use only env vars (in which case get_config_schema() fields
|
||||
should all have ``env_var`` set and this method stays no-op).
|
||||
"""
|
||||
|
||||
def on_memory_write(
|
||||
self,
|
||||
action: str,
|
||||
target: str,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Called when the built-in memory tool writes an entry.
|
||||
|
||||
action: 'add', 'replace', or 'remove'
|
||||
target: 'memory' or 'user'
|
||||
content: the entry content
|
||||
metadata: structured provenance for the write, when available. Common
|
||||
keys include ``write_origin``, ``execution_context``, ``session_id``,
|
||||
``parent_session_id``, ``platform``, and ``tool_name``.
|
||||
|
||||
Use to mirror built-in memory writes to your backend.
|
||||
"""
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,19 @@
|
||||
"""Models.dev registry integration — primary database for providers and models.
|
||||
"""Models.dev registry integration for provider-aware context length detection.
|
||||
|
||||
Fetches from https://models.dev/api.json — a community-maintained database
|
||||
of 4000+ models across 109+ providers. Provides:
|
||||
Fetches model metadata from https://models.dev/api.json — a community-maintained
|
||||
database of 3800+ models across 100+ providers, including per-provider context
|
||||
windows, pricing, and capabilities.
|
||||
|
||||
- **Provider metadata**: name, base URL, env vars, documentation link
|
||||
- **Model metadata**: context window, max output, cost/M tokens, capabilities
|
||||
(reasoning, tools, vision, PDF, audio), modalities, knowledge cutoff,
|
||||
open-weights flag, family grouping, deprecation status
|
||||
|
||||
Data resolution order (like TypeScript OpenCode):
|
||||
1. Bundled snapshot (ships with the package — offline-first)
|
||||
2. Disk cache (~/.hermes/models_dev_cache.json)
|
||||
3. Network fetch (https://models.dev/api.json)
|
||||
4. Background refresh every 60 minutes
|
||||
|
||||
Other modules should import the dataclasses and query functions from here
|
||||
rather than parsing the raw JSON themselves.
|
||||
Data is cached in memory (1hr TTL) and on disk (~/.hermes/models_dev_cache.json)
|
||||
to avoid cold-start network latency.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from utils import atomic_json_write
|
||||
|
||||
@@ -38,151 +28,30 @@ _MODELS_DEV_CACHE_TTL = 3600 # 1 hour in-memory
|
||||
_models_dev_cache: Dict[str, Any] = {}
|
||||
_models_dev_cache_time: float = 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dataclasses — rich metadata for providers and models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@dataclass
|
||||
class ModelInfo:
|
||||
"""Full metadata for a single model from models.dev."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
family: str
|
||||
provider_id: str # models.dev provider ID (e.g. "anthropic")
|
||||
|
||||
# Capabilities
|
||||
reasoning: bool = False
|
||||
tool_call: bool = False
|
||||
attachment: bool = False # supports image/file attachments (vision)
|
||||
temperature: bool = False
|
||||
structured_output: bool = False
|
||||
open_weights: bool = False
|
||||
|
||||
# Modalities
|
||||
input_modalities: Tuple[str, ...] = () # ("text", "image", "pdf", ...)
|
||||
output_modalities: Tuple[str, ...] = ()
|
||||
|
||||
# Limits
|
||||
context_window: int = 0
|
||||
max_output: int = 0
|
||||
max_input: Optional[int] = None
|
||||
|
||||
# Cost (per million tokens, USD)
|
||||
cost_input: float = 0.0
|
||||
cost_output: float = 0.0
|
||||
cost_cache_read: Optional[float] = None
|
||||
cost_cache_write: Optional[float] = None
|
||||
|
||||
# Metadata
|
||||
knowledge_cutoff: str = ""
|
||||
release_date: str = ""
|
||||
status: str = "" # "alpha", "beta", "deprecated", or ""
|
||||
interleaved: Any = False # True or {"field": "reasoning_content"}
|
||||
|
||||
def has_cost_data(self) -> bool:
|
||||
return self.cost_input > 0 or self.cost_output > 0
|
||||
|
||||
def supports_vision(self) -> bool:
|
||||
return self.attachment or "image" in self.input_modalities
|
||||
|
||||
def supports_pdf(self) -> bool:
|
||||
return "pdf" in self.input_modalities
|
||||
|
||||
def supports_audio_input(self) -> bool:
|
||||
return "audio" in self.input_modalities
|
||||
|
||||
def format_cost(self) -> str:
|
||||
"""Human-readable cost string, e.g. '$3.00/M in, $15.00/M out'."""
|
||||
if not self.has_cost_data():
|
||||
return "unknown"
|
||||
parts = [f"${self.cost_input:.2f}/M in", f"${self.cost_output:.2f}/M out"]
|
||||
if self.cost_cache_read is not None:
|
||||
parts.append(f"cache read ${self.cost_cache_read:.2f}/M")
|
||||
return ", ".join(parts)
|
||||
|
||||
def format_capabilities(self) -> str:
|
||||
"""Human-readable capabilities, e.g. 'reasoning, tools, vision, PDF'."""
|
||||
caps = []
|
||||
if self.reasoning:
|
||||
caps.append("reasoning")
|
||||
if self.tool_call:
|
||||
caps.append("tools")
|
||||
if self.supports_vision():
|
||||
caps.append("vision")
|
||||
if self.supports_pdf():
|
||||
caps.append("PDF")
|
||||
if self.supports_audio_input():
|
||||
caps.append("audio")
|
||||
if self.structured_output:
|
||||
caps.append("structured output")
|
||||
if self.open_weights:
|
||||
caps.append("open weights")
|
||||
return ", ".join(caps) if caps else "basic"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProviderInfo:
|
||||
"""Full metadata for a provider from models.dev."""
|
||||
|
||||
id: str # models.dev provider ID
|
||||
name: str # display name
|
||||
env: Tuple[str, ...] # env var names for API key
|
||||
api: str # base URL
|
||||
doc: str = "" # documentation URL
|
||||
model_count: int = 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Provider ID mapping: Hermes ↔ models.dev
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Hermes provider names → models.dev provider IDs
|
||||
# Provider ID mapping: Hermes provider names → models.dev provider IDs
|
||||
PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
|
||||
"openrouter": "openrouter",
|
||||
"anthropic": "anthropic",
|
||||
"openai": "openai",
|
||||
"openai-codex": "openai",
|
||||
"zai": "zai",
|
||||
"kimi-coding": "kimi-for-coding",
|
||||
"stepfun": "stepfun",
|
||||
"kimi-coding-cn": "kimi-for-coding",
|
||||
"minimax": "minimax",
|
||||
"minimax-oauth": "minimax",
|
||||
"minimax-cn": "minimax-cn",
|
||||
"deepseek": "deepseek",
|
||||
"alibaba": "alibaba",
|
||||
"qwen-oauth": "alibaba",
|
||||
"copilot": "github-copilot",
|
||||
"ai-gateway": "vercel",
|
||||
"opencode-zen": "opencode",
|
||||
"opencode-go": "opencode-go",
|
||||
"kilocode": "kilo",
|
||||
"fireworks": "fireworks-ai",
|
||||
"huggingface": "huggingface",
|
||||
"gemini": "google",
|
||||
"google": "google",
|
||||
"xai": "xai",
|
||||
"xiaomi": "xiaomi",
|
||||
"nvidia": "nvidia",
|
||||
"groq": "groq",
|
||||
"mistral": "mistral",
|
||||
"togetherai": "togetherai",
|
||||
"perplexity": "perplexity",
|
||||
"cohere": "cohere",
|
||||
"ollama-cloud": "ollama-cloud",
|
||||
}
|
||||
|
||||
# Reverse mapping: models.dev → Hermes (built lazily)
|
||||
_MODELS_DEV_TO_PROVIDER: Optional[Dict[str, str]] = None
|
||||
|
||||
|
||||
|
||||
def _get_cache_path() -> Path:
|
||||
"""Return path to disk cache file."""
|
||||
from hermes_constants import get_hermes_home
|
||||
return get_hermes_home() / "models_dev_cache.json"
|
||||
env_val = os.environ.get("HERMES_HOME", "")
|
||||
hermes_home = Path(env_val) if env_val else Path.home() / ".hermes"
|
||||
return hermes_home / "models_dev_cache.json"
|
||||
|
||||
|
||||
def _load_disk_cache() -> Dict[str, Any]:
|
||||
@@ -226,7 +95,7 @@ def fetch_models_dev(force_refresh: bool = False) -> Dict[str, Any]:
|
||||
response = requests.get(MODELS_DEV_URL, timeout=15)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
if isinstance(data, dict) and data:
|
||||
if isinstance(data, dict) and len(data) > 0:
|
||||
_models_dev_cache = data
|
||||
_models_dev_cache_time = time.time()
|
||||
_save_disk_cache(data)
|
||||
@@ -301,335 +170,3 @@ def _extract_context(entry: Dict[str, Any]) -> Optional[int]:
|
||||
if isinstance(ctx, (int, float)) and ctx > 0:
|
||||
return int(ctx)
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Model capability metadata
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelCapabilities:
|
||||
"""Structured capability metadata for a model from models.dev."""
|
||||
|
||||
supports_tools: bool = True
|
||||
supports_vision: bool = False
|
||||
supports_reasoning: bool = False
|
||||
context_window: int = 200000
|
||||
max_output_tokens: int = 8192
|
||||
model_family: str = ""
|
||||
|
||||
|
||||
def _get_provider_models(provider: str) -> Optional[Dict[str, Any]]:
|
||||
"""Resolve a Hermes provider ID to its models dict from models.dev.
|
||||
|
||||
Returns the models dict or None if the provider is unknown or has no data.
|
||||
"""
|
||||
mdev_provider_id = PROVIDER_TO_MODELS_DEV.get(provider)
|
||||
if not mdev_provider_id:
|
||||
return None
|
||||
|
||||
data = fetch_models_dev()
|
||||
provider_data = data.get(mdev_provider_id)
|
||||
if not isinstance(provider_data, dict):
|
||||
return None
|
||||
|
||||
models = provider_data.get("models", {})
|
||||
if not isinstance(models, dict):
|
||||
return None
|
||||
|
||||
return models
|
||||
|
||||
|
||||
def _find_model_entry(models: Dict[str, Any], model: str) -> Optional[Dict[str, Any]]:
|
||||
"""Find a model entry by exact match, then case-insensitive fallback."""
|
||||
# Exact match
|
||||
entry = models.get(model)
|
||||
if isinstance(entry, dict):
|
||||
return entry
|
||||
|
||||
# Case-insensitive match
|
||||
model_lower = model.lower()
|
||||
for mid, mdata in models.items():
|
||||
if mid.lower() == model_lower and isinstance(mdata, dict):
|
||||
return mdata
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_model_capabilities(provider: str, model: str) -> Optional[ModelCapabilities]:
|
||||
"""Look up full capability metadata from models.dev cache.
|
||||
|
||||
Uses the existing fetch_models_dev() and PROVIDER_TO_MODELS_DEV mapping.
|
||||
Returns None if model not found.
|
||||
|
||||
Extracts from model entry fields:
|
||||
- reasoning (bool) → supports_reasoning
|
||||
- tool_call (bool) → supports_tools
|
||||
- attachment (bool) → supports_vision
|
||||
- limit.context (int) → context_window
|
||||
- limit.output (int) → max_output_tokens
|
||||
- family (str) → model_family
|
||||
"""
|
||||
models = _get_provider_models(provider)
|
||||
if models is None:
|
||||
return None
|
||||
|
||||
entry = _find_model_entry(models, model)
|
||||
if entry is None:
|
||||
return None
|
||||
|
||||
# Extract capability flags (default to False if missing)
|
||||
supports_tools = bool(entry.get("tool_call", False))
|
||||
# Vision: prefer explicit `modalities.input` when models.dev provides it.
|
||||
# The older `attachment` flag can be stale or too broad for image routing;
|
||||
# fall back to it only when the input modalities are absent/invalid.
|
||||
input_mods = entry.get("modalities", {})
|
||||
if isinstance(input_mods, dict):
|
||||
input_mods = input_mods.get("input")
|
||||
else:
|
||||
input_mods = None
|
||||
if isinstance(input_mods, list):
|
||||
supports_vision = "image" in input_mods
|
||||
else:
|
||||
supports_vision = bool(entry.get("attachment", False))
|
||||
supports_reasoning = bool(entry.get("reasoning", False))
|
||||
|
||||
# Extract limits
|
||||
limit = entry.get("limit", {})
|
||||
if not isinstance(limit, dict):
|
||||
limit = {}
|
||||
|
||||
ctx = limit.get("context")
|
||||
context_window = int(ctx) if isinstance(ctx, (int, float)) and ctx > 0 else 200000
|
||||
|
||||
out = limit.get("output")
|
||||
max_output_tokens = int(out) if isinstance(out, (int, float)) and out > 0 else 8192
|
||||
|
||||
model_family = entry.get("family", "") or ""
|
||||
|
||||
return ModelCapabilities(
|
||||
supports_tools=supports_tools,
|
||||
supports_vision=supports_vision,
|
||||
supports_reasoning=supports_reasoning,
|
||||
context_window=context_window,
|
||||
max_output_tokens=max_output_tokens,
|
||||
model_family=model_family,
|
||||
)
|
||||
|
||||
|
||||
def list_provider_models(provider: str) -> List[str]:
|
||||
"""Return all model IDs for a provider from models.dev.
|
||||
|
||||
Returns an empty list if the provider is unknown or has no data.
|
||||
"""
|
||||
from hermes_cli.models import normalize_provider
|
||||
provider = normalize_provider(provider) or provider
|
||||
|
||||
models = _get_provider_models(provider)
|
||||
if models is None:
|
||||
return []
|
||||
return [
|
||||
mid for mid in models.keys()
|
||||
if not _should_hide_from_provider_catalog(provider, mid)
|
||||
]
|
||||
|
||||
|
||||
# Patterns that indicate non-agentic or noise models (TTS, embedding,
|
||||
# dated preview snapshots, live/streaming-only, image-only).
|
||||
import re
|
||||
_NOISE_PATTERNS: re.Pattern = re.compile(
|
||||
r"-tts\b|embedding|live-|-(preview|exp)-\d{2,4}[-_]|"
|
||||
r"-image\b|-image-preview\b|-customtools\b",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# Google's live Gemini catalogs currently include a mix of stale slugs and
|
||||
# Gemma models whose TPM quotas are too small for normal Hermes agent traffic.
|
||||
# Keep capability metadata available for direct/manual use, but hide these from
|
||||
# the Gemini model catalogs we surface in setup and model selection.
|
||||
_GOOGLE_HIDDEN_MODELS = frozenset({
|
||||
# Low-TPM Gemma models that trip Google input-token quota walls under
|
||||
# agent-style traffic despite advertising large context windows.
|
||||
"gemma-4-31b-it",
|
||||
"gemma-4-26b-it",
|
||||
"gemma-4-26b-a4b-it",
|
||||
"gemma-3-1b",
|
||||
"gemma-3-1b-it",
|
||||
"gemma-3-2b",
|
||||
"gemma-3-2b-it",
|
||||
"gemma-3-4b",
|
||||
"gemma-3-4b-it",
|
||||
"gemma-3-12b",
|
||||
"gemma-3-12b-it",
|
||||
"gemma-3-27b",
|
||||
"gemma-3-27b-it",
|
||||
# Stale/retired Google slugs that still surface through models.dev-backed
|
||||
# Gemini selection but 404 on the current Google endpoints.
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-lite",
|
||||
})
|
||||
|
||||
|
||||
def _should_hide_from_provider_catalog(provider: str, model_id: str) -> bool:
|
||||
provider_lower = (provider or "").strip().lower()
|
||||
model_lower = (model_id or "").strip().lower()
|
||||
if provider_lower in {"gemini", "google"} and model_lower in _GOOGLE_HIDDEN_MODELS:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def list_agentic_models(provider: str) -> List[str]:
|
||||
"""Return model IDs suitable for agentic use from models.dev.
|
||||
|
||||
Filters for tool_call=True and excludes noise (TTS, embedding,
|
||||
dated preview snapshots, live/streaming, image-only models).
|
||||
Returns an empty list on any failure.
|
||||
"""
|
||||
models = _get_provider_models(provider)
|
||||
if models is None:
|
||||
return []
|
||||
|
||||
result = []
|
||||
for mid, entry in models.items():
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
if _should_hide_from_provider_catalog(provider, mid):
|
||||
continue
|
||||
if not entry.get("tool_call", False):
|
||||
continue
|
||||
if _NOISE_PATTERNS.search(mid):
|
||||
continue
|
||||
result.append(mid)
|
||||
return result
|
||||
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Rich dataclass constructors — parse raw models.dev JSON into dataclasses
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _parse_model_info(model_id: str, raw: Dict[str, Any], provider_id: str) -> ModelInfo:
|
||||
"""Convert a raw models.dev model entry dict into a ModelInfo dataclass."""
|
||||
limit = raw.get("limit") or {}
|
||||
if not isinstance(limit, dict):
|
||||
limit = {}
|
||||
|
||||
cost = raw.get("cost") or {}
|
||||
if not isinstance(cost, dict):
|
||||
cost = {}
|
||||
|
||||
modalities = raw.get("modalities") or {}
|
||||
if not isinstance(modalities, dict):
|
||||
modalities = {}
|
||||
|
||||
input_mods = modalities.get("input") or []
|
||||
output_mods = modalities.get("output") or []
|
||||
|
||||
ctx = limit.get("context")
|
||||
ctx_int = int(ctx) if isinstance(ctx, (int, float)) and ctx > 0 else 0
|
||||
out = limit.get("output")
|
||||
out_int = int(out) if isinstance(out, (int, float)) and out > 0 else 0
|
||||
inp = limit.get("input")
|
||||
inp_int = int(inp) if isinstance(inp, (int, float)) and inp > 0 else None
|
||||
|
||||
return ModelInfo(
|
||||
id=model_id,
|
||||
name=raw.get("name", "") or model_id,
|
||||
family=raw.get("family", "") or "",
|
||||
provider_id=provider_id,
|
||||
reasoning=bool(raw.get("reasoning", False)),
|
||||
tool_call=bool(raw.get("tool_call", False)),
|
||||
attachment=bool(raw.get("attachment", False)),
|
||||
temperature=bool(raw.get("temperature", False)),
|
||||
structured_output=bool(raw.get("structured_output", False)),
|
||||
open_weights=bool(raw.get("open_weights", False)),
|
||||
input_modalities=tuple(input_mods) if isinstance(input_mods, list) else (),
|
||||
output_modalities=tuple(output_mods) if isinstance(output_mods, list) else (),
|
||||
context_window=ctx_int,
|
||||
max_output=out_int,
|
||||
max_input=inp_int,
|
||||
cost_input=float(cost.get("input", 0) or 0),
|
||||
cost_output=float(cost.get("output", 0) or 0),
|
||||
cost_cache_read=float(cost["cache_read"]) if "cache_read" in cost and cost["cache_read"] is not None else None,
|
||||
cost_cache_write=float(cost["cache_write"]) if "cache_write" in cost and cost["cache_write"] is not None else None,
|
||||
knowledge_cutoff=raw.get("knowledge", "") or "",
|
||||
release_date=raw.get("release_date", "") or "",
|
||||
status=raw.get("status", "") or "",
|
||||
interleaved=raw.get("interleaved", False),
|
||||
)
|
||||
|
||||
|
||||
def _parse_provider_info(provider_id: str, raw: Dict[str, Any]) -> ProviderInfo:
|
||||
"""Convert a raw models.dev provider entry dict into a ProviderInfo."""
|
||||
env = raw.get("env") or []
|
||||
models = raw.get("models") or {}
|
||||
return ProviderInfo(
|
||||
id=provider_id,
|
||||
name=raw.get("name", "") or provider_id,
|
||||
env=tuple(env) if isinstance(env, list) else (),
|
||||
api=raw.get("api", "") or "",
|
||||
doc=raw.get("doc", "") or "",
|
||||
model_count=len(models) if isinstance(models, dict) else 0,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Provider-level queries
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_provider_info(provider_id: str) -> Optional[ProviderInfo]:
|
||||
"""Get full provider metadata from models.dev.
|
||||
|
||||
Accepts either a Hermes provider ID (e.g. "kilocode") or a models.dev
|
||||
ID (e.g. "kilo"). Returns None if the provider is not in the catalog.
|
||||
"""
|
||||
# Resolve Hermes ID → models.dev ID
|
||||
mdev_id = PROVIDER_TO_MODELS_DEV.get(provider_id, provider_id)
|
||||
|
||||
data = fetch_models_dev()
|
||||
raw = data.get(mdev_id)
|
||||
if not isinstance(raw, dict):
|
||||
return None
|
||||
|
||||
return _parse_provider_info(mdev_id, raw)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Model-level queries (rich ModelInfo)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_model_info(
|
||||
provider_id: str, model_id: str
|
||||
) -> Optional[ModelInfo]:
|
||||
"""Get full model metadata from models.dev.
|
||||
|
||||
Accepts Hermes or models.dev provider ID. Tries exact match then
|
||||
case-insensitive fallback. Returns None if not found.
|
||||
"""
|
||||
mdev_id = PROVIDER_TO_MODELS_DEV.get(provider_id, provider_id)
|
||||
|
||||
data = fetch_models_dev()
|
||||
pdata = data.get(mdev_id)
|
||||
if not isinstance(pdata, dict):
|
||||
return None
|
||||
|
||||
models = pdata.get("models", {})
|
||||
if not isinstance(models, dict):
|
||||
return None
|
||||
|
||||
# Exact match
|
||||
raw = models.get(model_id)
|
||||
if isinstance(raw, dict):
|
||||
return _parse_model_info(model_id, raw, mdev_id)
|
||||
|
||||
# Case-insensitive fallback
|
||||
model_lower = model_id.lower()
|
||||
for mid, mdata in models.items():
|
||||
if mid.lower() == model_lower and isinstance(mdata, dict):
|
||||
return _parse_model_info(mid, mdata, mdev_id)
|
||||
|
||||
return None
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
"""Helpers for translating OpenAI-style tool schemas to Moonshot's schema subset.
|
||||
|
||||
Moonshot (Kimi) accepts a stricter subset of JSON Schema than standard OpenAI
|
||||
tool calling. Requests that violate it fail with HTTP 400:
|
||||
|
||||
tools.function.parameters is not a valid moonshot flavored json schema,
|
||||
details: <...>
|
||||
|
||||
Known rejection modes documented at
|
||||
https://forum.moonshot.ai/t/tool-calling-specification-violation-on-moonshot-api/102
|
||||
and MoonshotAI/kimi-cli#1595:
|
||||
|
||||
1. Every property schema must carry a ``type``. Standard JSON Schema allows
|
||||
type to be omitted (the value is then unconstrained); Moonshot refuses.
|
||||
2. When ``anyOf`` is used, ``type`` must be on the ``anyOf`` children, not
|
||||
the parent. Presence of both causes "type should be defined in anyOf
|
||||
items instead of the parent schema".
|
||||
|
||||
The ``#/definitions/...`` → ``#/$defs/...`` rewrite for draft-07 refs is
|
||||
handled separately in ``tools/mcp_tool._normalize_mcp_input_schema`` so it
|
||||
applies at MCP registration time for all providers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
from typing import Any, Dict, List
|
||||
|
||||
# Keys whose values are maps of name → schema (not schemas themselves).
|
||||
# When we recurse, we walk the values of these maps as schemas, but we do
|
||||
# NOT apply the missing-type repair to the map itself.
|
||||
_SCHEMA_MAP_KEYS = frozenset({"properties", "patternProperties", "$defs", "definitions"})
|
||||
|
||||
# Keys whose values are lists of schemas.
|
||||
_SCHEMA_LIST_KEYS = frozenset({"anyOf", "oneOf", "allOf", "prefixItems"})
|
||||
|
||||
# Keys whose values are a single nested schema.
|
||||
_SCHEMA_NODE_KEYS = frozenset({"items", "contains", "not", "additionalProperties", "propertyNames"})
|
||||
|
||||
|
||||
def _repair_schema(node: Any, is_schema: bool = True) -> Any:
|
||||
"""Recursively apply Moonshot repairs to a schema node.
|
||||
|
||||
``is_schema=True`` means this dict is a JSON Schema node and gets the
|
||||
missing-type + anyOf-parent repairs applied. ``is_schema=False`` means
|
||||
it's a container map (e.g. the value of ``properties``) and we only
|
||||
recurse into its values.
|
||||
"""
|
||||
if isinstance(node, list):
|
||||
# Lists only show up under schema-list keys (anyOf/oneOf/allOf), so
|
||||
# every element is itself a schema.
|
||||
return [_repair_schema(item, is_schema=True) for item in node]
|
||||
if not isinstance(node, dict):
|
||||
return node
|
||||
|
||||
# Walk the dict, deciding per-key whether recursion is into a schema
|
||||
# node, a container map, or a scalar.
|
||||
repaired: Dict[str, Any] = {}
|
||||
for key, value in node.items():
|
||||
if key in _SCHEMA_MAP_KEYS and isinstance(value, dict):
|
||||
# Map of name → schema. Don't treat the map itself as a schema
|
||||
# (it has no type / properties of its own), but each value is.
|
||||
repaired[key] = {
|
||||
sub_key: _repair_schema(sub_val, is_schema=True)
|
||||
for sub_key, sub_val in value.items()
|
||||
}
|
||||
elif key in _SCHEMA_LIST_KEYS and isinstance(value, list):
|
||||
repaired[key] = [_repair_schema(v, is_schema=True) for v in value]
|
||||
elif key in _SCHEMA_NODE_KEYS:
|
||||
# items / not / additionalProperties: single nested schema.
|
||||
# additionalProperties can also be a bool — leave those alone.
|
||||
if isinstance(value, dict):
|
||||
repaired[key] = _repair_schema(value, is_schema=True)
|
||||
else:
|
||||
repaired[key] = value
|
||||
else:
|
||||
# Scalars (description, title, format, enum values, etc.) pass through.
|
||||
repaired[key] = value
|
||||
|
||||
if not is_schema:
|
||||
return repaired
|
||||
|
||||
# Rule 2: when anyOf is present, type belongs only on the children.
|
||||
# Additionally, Moonshot rejects null-type branches inside anyOf
|
||||
# (enum value (<nil>) does not match any type in [string]).
|
||||
# Collapse the anyOf to the first non-null branch and infer its type.
|
||||
if "anyOf" in repaired and isinstance(repaired["anyOf"], list):
|
||||
repaired.pop("type", None)
|
||||
non_null = [b for b in repaired["anyOf"]
|
||||
if isinstance(b, dict) and b.get("type") != "null"]
|
||||
if non_null and len(non_null) < len(repaired["anyOf"]):
|
||||
# Drop the anyOf wrapper — keep only the non-null branch.
|
||||
# If there's a single non-null branch, promote it and fall
|
||||
# through to Rules 1/3 so nullable/enum cleanup still applies
|
||||
# to the merged node.
|
||||
if len(non_null) == 1:
|
||||
merge = {k: v for k, v in repaired.items() if k != "anyOf"}
|
||||
merge.update(non_null[0])
|
||||
repaired = merge
|
||||
else:
|
||||
repaired["anyOf"] = non_null
|
||||
return repaired
|
||||
else:
|
||||
# Nothing to collapse — parent type stripped, children already
|
||||
# repaired by the recursive walk above.
|
||||
return repaired
|
||||
|
||||
# Moonshot also rejects non-standard keywords like ``nullable`` on
|
||||
# parameter schemas — strip it.
|
||||
repaired.pop("nullable", None)
|
||||
|
||||
# Rule 1: property schemas without type need one. $ref nodes are exempt
|
||||
# — their type comes from the referenced definition.
|
||||
# Fill missing type BEFORE Rule 3 so enum cleanup can check the type.
|
||||
if "$ref" not in repaired:
|
||||
repaired = _fill_missing_type(repaired)
|
||||
|
||||
# Rule 3: Moonshot rejects null/empty-string values inside enum arrays
|
||||
# when the parent type is a scalar (string, integer, etc.). The error:
|
||||
# "enum value (<nil>) does not match any type in [string]"
|
||||
# Strip null and empty-string from enum values, and if the enum becomes
|
||||
# empty, drop it entirely.
|
||||
if "enum" in repaired and isinstance(repaired["enum"], list):
|
||||
node_type = repaired.get("type")
|
||||
if node_type in ("string", "integer", "number", "boolean"):
|
||||
cleaned = [v for v in repaired["enum"]
|
||||
if v is not None and v != ""]
|
||||
if cleaned:
|
||||
repaired["enum"] = cleaned
|
||||
else:
|
||||
repaired.pop("enum")
|
||||
|
||||
return repaired
|
||||
|
||||
|
||||
def _fill_missing_type(node: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Infer a reasonable ``type`` if this schema node has none."""
|
||||
if "type" in node and node["type"] not in (None, ""):
|
||||
return node
|
||||
|
||||
# Heuristic: presence of ``properties`` → object, ``items`` → array, ``enum``
|
||||
# → type of first enum value, else fall back to ``string`` (safest scalar).
|
||||
if "properties" in node or "required" in node or "additionalProperties" in node:
|
||||
inferred = "object"
|
||||
elif "items" in node or "prefixItems" in node:
|
||||
inferred = "array"
|
||||
elif "enum" in node and isinstance(node["enum"], list) and node["enum"]:
|
||||
sample = node["enum"][0]
|
||||
if isinstance(sample, bool):
|
||||
inferred = "boolean"
|
||||
elif isinstance(sample, int):
|
||||
inferred = "integer"
|
||||
elif isinstance(sample, float):
|
||||
inferred = "number"
|
||||
else:
|
||||
inferred = "string"
|
||||
else:
|
||||
inferred = "string"
|
||||
|
||||
return {**node, "type": inferred}
|
||||
|
||||
|
||||
def sanitize_moonshot_tool_parameters(parameters: Any) -> Dict[str, Any]:
|
||||
"""Normalize tool parameters to a Moonshot-compatible object schema.
|
||||
|
||||
Returns a deep-copied schema with the two flavored-JSON-Schema repairs
|
||||
applied. Input is not mutated.
|
||||
"""
|
||||
if not isinstance(parameters, dict):
|
||||
return {"type": "object", "properties": {}}
|
||||
|
||||
repaired = _repair_schema(copy.deepcopy(parameters), is_schema=True)
|
||||
if not isinstance(repaired, dict):
|
||||
return {"type": "object", "properties": {}}
|
||||
|
||||
# Top-level must be an object schema
|
||||
if repaired.get("type") != "object":
|
||||
repaired["type"] = "object"
|
||||
if "properties" not in repaired:
|
||||
repaired["properties"] = {}
|
||||
|
||||
return repaired
|
||||
|
||||
|
||||
def sanitize_moonshot_tools(tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Apply ``sanitize_moonshot_tool_parameters`` to every tool's parameters."""
|
||||
if not tools:
|
||||
return tools
|
||||
|
||||
sanitized: List[Dict[str, Any]] = []
|
||||
any_change = False
|
||||
for tool in tools:
|
||||
if not isinstance(tool, dict):
|
||||
sanitized.append(tool)
|
||||
continue
|
||||
fn = tool.get("function")
|
||||
if not isinstance(fn, dict):
|
||||
sanitized.append(tool)
|
||||
continue
|
||||
params = fn.get("parameters")
|
||||
repaired = sanitize_moonshot_tool_parameters(params)
|
||||
if repaired is not params:
|
||||
any_change = True
|
||||
new_fn = {**fn, "parameters": repaired}
|
||||
sanitized.append({**tool, "function": new_fn})
|
||||
else:
|
||||
sanitized.append(tool)
|
||||
|
||||
return sanitized if any_change else tools
|
||||
|
||||
|
||||
def is_moonshot_model(model: str | None) -> bool:
|
||||
"""True for any Kimi / Moonshot model slug, regardless of aggregator prefix.
|
||||
|
||||
Matches bare names (``kimi-k2.6``, ``moonshotai/Kimi-K2.6``) and aggregator-
|
||||
prefixed slugs (``nous/moonshotai/kimi-k2.6``, ``openrouter/moonshotai/...``).
|
||||
Detection by model name covers Nous / OpenRouter / other aggregators that
|
||||
route to Moonshot's inference, where the base URL is the aggregator's, not
|
||||
``api.moonshot.ai``.
|
||||
"""
|
||||
if not model:
|
||||
return False
|
||||
bare = model.strip().lower()
|
||||
# Last path segment (covers aggregator-prefixed slugs)
|
||||
tail = bare.rsplit("/", 1)[-1]
|
||||
if tail.startswith("kimi-") or tail == "kimi":
|
||||
return True
|
||||
# Vendor-prefixed forms commonly used on aggregators
|
||||
if "moonshot" in bare or "/kimi" in bare or bare.startswith("kimi"):
|
||||
return True
|
||||
return False
|
||||
@@ -1,325 +0,0 @@
|
||||
"""Cross-session rate limit guard for Nous Portal.
|
||||
|
||||
Writes rate limit state to a shared file so all sessions (CLI, gateway,
|
||||
cron, auxiliary) can check whether Nous Portal is currently rate-limited
|
||||
before making requests. Prevents retry amplification when RPH is tapped.
|
||||
|
||||
Each 429 from Nous triggers up to 9 API calls per conversation turn
|
||||
(3 SDK retries x 3 Hermes retries), and every one of those calls counts
|
||||
against RPH. By recording the rate limit state on first 429 and checking
|
||||
it before subsequent attempts, we eliminate the amplification effect.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
from typing import Any, Mapping, Optional
|
||||
from utils import atomic_replace
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_STATE_SUBDIR = "rate_limits"
|
||||
_STATE_FILENAME = "nous.json"
|
||||
|
||||
|
||||
def _state_path() -> str:
|
||||
"""Return the path to the Nous rate limit state file."""
|
||||
try:
|
||||
from hermes_constants import get_hermes_home
|
||||
base = get_hermes_home()
|
||||
except ImportError:
|
||||
base = os.path.join(os.path.expanduser("~"), ".hermes")
|
||||
return os.path.join(base, _STATE_SUBDIR, _STATE_FILENAME)
|
||||
|
||||
|
||||
def _parse_reset_seconds(headers: Optional[Mapping[str, str]]) -> Optional[float]:
|
||||
"""Extract the best available reset-time estimate from response headers.
|
||||
|
||||
Priority:
|
||||
1. x-ratelimit-reset-requests-1h (hourly RPH window — most useful)
|
||||
2. x-ratelimit-reset-requests (per-minute RPM window)
|
||||
3. retry-after (generic HTTP header)
|
||||
|
||||
Returns seconds-from-now, or None if no usable header found.
|
||||
"""
|
||||
if not headers:
|
||||
return None
|
||||
|
||||
lowered = {k.lower(): v for k, v in headers.items()}
|
||||
|
||||
for key in (
|
||||
"x-ratelimit-reset-requests-1h",
|
||||
"x-ratelimit-reset-requests",
|
||||
"retry-after",
|
||||
):
|
||||
raw = lowered.get(key)
|
||||
if raw is not None:
|
||||
try:
|
||||
val = float(raw)
|
||||
if val > 0:
|
||||
return val
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def record_nous_rate_limit(
|
||||
*,
|
||||
headers: Optional[Mapping[str, str]] = None,
|
||||
error_context: Optional[dict[str, Any]] = None,
|
||||
default_cooldown: float = 300.0,
|
||||
) -> None:
|
||||
"""Record that Nous Portal is rate-limited.
|
||||
|
||||
Parses the reset time from response headers or error context.
|
||||
Falls back to ``default_cooldown`` (5 minutes) if no reset info
|
||||
is available. Writes to a shared file that all sessions can read.
|
||||
|
||||
Args:
|
||||
headers: HTTP response headers from the 429 error.
|
||||
error_context: Structured error context from _extract_api_error_context().
|
||||
default_cooldown: Fallback cooldown in seconds when no header data.
|
||||
"""
|
||||
now = time.time()
|
||||
reset_at = None
|
||||
|
||||
# Try headers first (most accurate)
|
||||
header_seconds = _parse_reset_seconds(headers)
|
||||
if header_seconds is not None:
|
||||
reset_at = now + header_seconds
|
||||
|
||||
# Try error_context reset_at (from body parsing)
|
||||
if reset_at is None and isinstance(error_context, dict):
|
||||
ctx_reset = error_context.get("reset_at")
|
||||
if isinstance(ctx_reset, (int, float)) and ctx_reset > now:
|
||||
reset_at = float(ctx_reset)
|
||||
|
||||
# Default cooldown
|
||||
if reset_at is None:
|
||||
reset_at = now + default_cooldown
|
||||
|
||||
path = _state_path()
|
||||
try:
|
||||
state_dir = os.path.dirname(path)
|
||||
os.makedirs(state_dir, exist_ok=True)
|
||||
|
||||
state = {
|
||||
"reset_at": reset_at,
|
||||
"recorded_at": now,
|
||||
"reset_seconds": reset_at - now,
|
||||
}
|
||||
|
||||
# Atomic write: write to temp file + rename
|
||||
fd, tmp_path = tempfile.mkstemp(dir=state_dir, suffix=".tmp")
|
||||
try:
|
||||
with os.fdopen(fd, "w") as f:
|
||||
json.dump(state, f)
|
||||
atomic_replace(tmp_path, path)
|
||||
except Exception:
|
||||
# Clean up temp file on failure
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
logger.info(
|
||||
"Nous rate limit recorded: resets in %.0fs (at %.0f)",
|
||||
reset_at - now, reset_at,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to write Nous rate limit state: %s", exc)
|
||||
|
||||
|
||||
def nous_rate_limit_remaining() -> Optional[float]:
|
||||
"""Check if Nous Portal is currently rate-limited.
|
||||
|
||||
Returns:
|
||||
Seconds remaining until reset, or None if not rate-limited.
|
||||
"""
|
||||
path = _state_path()
|
||||
try:
|
||||
with open(path) as f:
|
||||
state = json.load(f)
|
||||
reset_at = state.get("reset_at", 0)
|
||||
remaining = reset_at - time.time()
|
||||
if remaining > 0:
|
||||
return remaining
|
||||
# Expired — clean up
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError:
|
||||
pass
|
||||
return None
|
||||
except (FileNotFoundError, json.JSONDecodeError, KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def clear_nous_rate_limit() -> None:
|
||||
"""Clear the rate limit state (e.g., after a successful Nous request)."""
|
||||
try:
|
||||
os.unlink(_state_path())
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except OSError as exc:
|
||||
logger.debug("Failed to clear Nous rate limit state: %s", exc)
|
||||
|
||||
|
||||
def format_remaining(seconds: float) -> str:
|
||||
"""Format seconds remaining into human-readable duration."""
|
||||
s = max(0, int(seconds))
|
||||
if s < 60:
|
||||
return f"{s}s"
|
||||
if s < 3600:
|
||||
m, sec = divmod(s, 60)
|
||||
return f"{m}m {sec}s" if sec else f"{m}m"
|
||||
h, remainder = divmod(s, 3600)
|
||||
m = remainder // 60
|
||||
return f"{h}h {m}m" if m else f"{h}h"
|
||||
|
||||
|
||||
# Buckets with reset windows shorter than this are treated as transient
|
||||
# (upstream jitter, secondary throttling) rather than a genuine quota
|
||||
# exhaustion worth a cross-session breaker trip.
|
||||
_MIN_RESET_FOR_BREAKER_SECONDS = 60.0
|
||||
|
||||
|
||||
def is_genuine_nous_rate_limit(
|
||||
*,
|
||||
headers: Optional[Mapping[str, str]] = None,
|
||||
last_known_state: Optional[Any] = None,
|
||||
) -> bool:
|
||||
"""Decide whether a 429 from Nous Portal is a real account rate limit.
|
||||
|
||||
Nous Portal multiplexes multiple upstream providers (DeepSeek, Kimi,
|
||||
MiMo, Hermes, ...) behind one endpoint. A 429 can mean either:
|
||||
|
||||
(a) The caller's own RPM / RPH / TPM / TPH bucket on Nous is
|
||||
exhausted — a genuine rate limit that will last until the
|
||||
bucket resets.
|
||||
(b) The upstream provider is out of capacity for a specific model
|
||||
— transient, clears in seconds, and has nothing to do with
|
||||
the caller's quota on Nous.
|
||||
|
||||
Tripping the cross-session breaker on (b) blocks ALL Nous requests
|
||||
(and all models, since Nous is one provider key) for minutes even
|
||||
though the caller's account is healthy and a different model would
|
||||
have worked. That's the bug users hit when DeepSeek V4 Pro 429s
|
||||
trigger a breaker that then blocks Kimi 2.6 and MiMo V2.5 Pro.
|
||||
|
||||
We tell the two apart by looking at:
|
||||
|
||||
1. The 429 response's own ``x-ratelimit-*`` headers. Nous emits
|
||||
the full suite on every response including 429s. An exhausted
|
||||
bucket (``remaining == 0`` with a reset window >= 60s) is
|
||||
proof of (a).
|
||||
2. The last-known-good rate-limit state captured by
|
||||
``_capture_rate_limits()`` on the previous successful
|
||||
response. If any bucket there was already near-exhausted with
|
||||
a substantial reset window, the current 429 is almost
|
||||
certainly (a) continuing from that condition.
|
||||
|
||||
If neither signal fires, we treat the 429 as (b): fail the single
|
||||
request, let the retry loop or model-switch proceed, and do NOT
|
||||
write the cross-session breaker file.
|
||||
|
||||
Returns True when the evidence points at (a).
|
||||
"""
|
||||
# Signal 1: current 429 response headers.
|
||||
state = _parse_buckets_from_headers(headers)
|
||||
if _has_exhausted_bucket(state):
|
||||
return True
|
||||
|
||||
# Signal 2: last-known-good state from a recent successful response.
|
||||
# Accepts either a RateLimitState (dataclass from rate_limit_tracker)
|
||||
# or a dict of bucket snapshots.
|
||||
if last_known_state is not None and _has_exhausted_bucket_in_object(last_known_state):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _parse_buckets_from_headers(
|
||||
headers: Optional[Mapping[str, str]],
|
||||
) -> dict[str, tuple[Optional[int], Optional[float]]]:
|
||||
"""Extract (remaining, reset_seconds) per bucket from x-ratelimit-* headers.
|
||||
|
||||
Returns empty dict when no rate-limit headers are present.
|
||||
"""
|
||||
if not headers:
|
||||
return {}
|
||||
|
||||
lowered = {k.lower(): v for k, v in headers.items()}
|
||||
if not any(k.startswith("x-ratelimit-") for k in lowered):
|
||||
return {}
|
||||
|
||||
def _maybe_int(raw: Optional[str]) -> Optional[int]:
|
||||
if raw is None:
|
||||
return None
|
||||
try:
|
||||
return int(float(raw))
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
def _maybe_float(raw: Optional[str]) -> Optional[float]:
|
||||
if raw is None:
|
||||
return None
|
||||
try:
|
||||
return float(raw)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
result: dict[str, tuple[Optional[int], Optional[float]]] = {}
|
||||
for tag in ("requests", "requests-1h", "tokens", "tokens-1h"):
|
||||
remaining = _maybe_int(lowered.get(f"x-ratelimit-remaining-{tag}"))
|
||||
reset = _maybe_float(lowered.get(f"x-ratelimit-reset-{tag}"))
|
||||
if remaining is not None or reset is not None:
|
||||
result[tag] = (remaining, reset)
|
||||
return result
|
||||
|
||||
|
||||
def _has_exhausted_bucket(
|
||||
buckets: Mapping[str, tuple[Optional[int], Optional[float]]],
|
||||
) -> bool:
|
||||
"""Return True when any bucket has remaining == 0 AND a meaningful reset window."""
|
||||
for remaining, reset in buckets.values():
|
||||
if remaining is None or remaining > 0:
|
||||
continue
|
||||
if reset is None:
|
||||
continue
|
||||
if reset >= _MIN_RESET_FOR_BREAKER_SECONDS:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _has_exhausted_bucket_in_object(state: Any) -> bool:
|
||||
"""Check a RateLimitState-like object for an exhausted bucket.
|
||||
|
||||
Accepts the dataclass from ``agent.rate_limit_tracker`` (buckets
|
||||
exposed as attributes ``requests_min``, ``requests_hour``,
|
||||
``tokens_min``, ``tokens_hour``) and falls back gracefully for any
|
||||
object missing those attributes.
|
||||
"""
|
||||
for attr in ("requests_min", "requests_hour", "tokens_min", "tokens_hour"):
|
||||
bucket = getattr(state, attr, None)
|
||||
if bucket is None:
|
||||
continue
|
||||
limit = getattr(bucket, "limit", 0) or 0
|
||||
remaining = getattr(bucket, "remaining", 0) or 0
|
||||
# Prefer the adjusted "remaining_seconds_now" property when present;
|
||||
# fall back to raw reset_seconds.
|
||||
reset = getattr(bucket, "remaining_seconds_now", None)
|
||||
if reset is None:
|
||||
reset = getattr(bucket, "reset_seconds", 0.0) or 0.0
|
||||
if limit <= 0:
|
||||
continue
|
||||
if remaining > 0:
|
||||
continue
|
||||
if reset >= _MIN_RESET_FOR_BREAKER_SECONDS:
|
||||
return True
|
||||
return False
|
||||
@@ -1,193 +0,0 @@
|
||||
"""
|
||||
Contextual first-touch onboarding hints.
|
||||
|
||||
Instead of blocking first-run questionnaires, show a one-time hint the *first*
|
||||
time a user hits a behavior fork — message-while-running, first long-running
|
||||
tool, etc. Each hint is shown once per install (tracked in ``config.yaml`` under
|
||||
``onboarding.seen.<flag>``) and then never again.
|
||||
|
||||
Keep this module tiny and dependency-free so both the CLI and gateway can import
|
||||
it without pulling in heavy modules.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Mapping, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Flag names (stable — used as config.yaml keys under onboarding.seen)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
BUSY_INPUT_FLAG = "busy_input_prompt"
|
||||
TOOL_PROGRESS_FLAG = "tool_progress_prompt"
|
||||
OPENCLAW_RESIDUE_FLAG = "openclaw_residue_cleanup"
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Hint content
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def busy_input_hint_gateway(mode: str) -> str:
|
||||
"""Hint shown the first time a user messages while the agent is busy.
|
||||
|
||||
``mode`` is the effective busy_input_mode that was just applied, so the
|
||||
message matches reality ("I just interrupted…" vs "I just queued…").
|
||||
"""
|
||||
if mode == "queue":
|
||||
return (
|
||||
"💡 First-time tip — I queued your message instead of interrupting. "
|
||||
"Send `/busy interrupt` to make new messages stop the current task "
|
||||
"immediately, or `/busy status` to check. This notice won't appear again."
|
||||
)
|
||||
if mode == "steer":
|
||||
return (
|
||||
"💡 First-time tip — I steered your message into the current run; "
|
||||
"it will arrive after the next tool call instead of interrupting. "
|
||||
"Send `/busy interrupt` or `/busy queue` to change this, or "
|
||||
"`/busy status` to check. This notice won't appear again."
|
||||
)
|
||||
return (
|
||||
"💡 First-time tip — I just interrupted my current task to answer you. "
|
||||
"Send `/busy queue` to queue follow-ups for after the current task instead, "
|
||||
"`/busy steer` to inject them mid-run without interrupting, or "
|
||||
"`/busy status` to check. This notice won't appear again."
|
||||
)
|
||||
|
||||
|
||||
def busy_input_hint_cli(mode: str) -> str:
|
||||
"""CLI version of the busy-input hint (plain text, no markdown)."""
|
||||
if mode == "queue":
|
||||
return (
|
||||
"(tip) Your message was queued for the next turn. "
|
||||
"Use /busy interrupt to make Enter stop the current run instead, "
|
||||
"or /busy steer to inject mid-run. This tip only shows once."
|
||||
)
|
||||
if mode == "steer":
|
||||
return (
|
||||
"(tip) Your message was steered into the current run; it arrives "
|
||||
"after the next tool call. Use /busy interrupt or /busy queue to "
|
||||
"change this. This tip only shows once."
|
||||
)
|
||||
return (
|
||||
"(tip) Your message interrupted the current run. "
|
||||
"Use /busy queue to queue messages for the next turn instead, "
|
||||
"or /busy steer to inject mid-run. This tip only shows once."
|
||||
)
|
||||
|
||||
|
||||
def tool_progress_hint_gateway() -> str:
|
||||
return (
|
||||
"💡 First-time tip — that tool took a while and I'm streaming every step. "
|
||||
"If the progress messages feel noisy, send `/verbose` to cycle modes "
|
||||
"(all → new → off). This notice won't appear again."
|
||||
)
|
||||
|
||||
|
||||
def tool_progress_hint_cli() -> str:
|
||||
return (
|
||||
"(tip) That tool ran for a while. Use /verbose to cycle tool-progress "
|
||||
"display modes (all -> new -> off -> verbose). This tip only shows once."
|
||||
)
|
||||
|
||||
|
||||
def openclaw_residue_hint_cli() -> str:
|
||||
"""Banner shown the first time Hermes starts and finds ``~/.openclaw/``.
|
||||
|
||||
Points users at ``hermes claw migrate`` (non-destructive port of config,
|
||||
memory, and skills) first. ``hermes claw cleanup`` is mentioned as the
|
||||
follow-up step for users who have already migrated and want to archive
|
||||
the old directory — with a warning that archiving breaks OpenClaw.
|
||||
"""
|
||||
return (
|
||||
"A legacy OpenClaw directory was detected at ~/.openclaw/.\n"
|
||||
"To port your config, memory, and skills over to Hermes, run "
|
||||
"`hermes claw migrate`.\n"
|
||||
"If you've already migrated and want to archive the old directory, "
|
||||
"run `hermes claw cleanup` (renames it to ~/.openclaw.pre-migration — "
|
||||
"OpenClaw will stop working after this).\n"
|
||||
"This tip only shows once."
|
||||
)
|
||||
|
||||
|
||||
def detect_openclaw_residue(home: Optional[Path] = None) -> bool:
|
||||
"""Return True if an OpenClaw workspace directory is present in ``$HOME``.
|
||||
|
||||
Pure filesystem check — no side effects. ``home`` override exists for tests.
|
||||
"""
|
||||
base = home or Path.home()
|
||||
try:
|
||||
return (base / ".openclaw").is_dir()
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# State read / write
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def _get_seen_dict(config: Mapping[str, Any]) -> Mapping[str, Any]:
|
||||
onboarding = config.get("onboarding") if isinstance(config, Mapping) else None
|
||||
if not isinstance(onboarding, Mapping):
|
||||
return {}
|
||||
seen = onboarding.get("seen")
|
||||
return seen if isinstance(seen, Mapping) else {}
|
||||
|
||||
|
||||
def is_seen(config: Mapping[str, Any], flag: str) -> bool:
|
||||
"""Return True if the user has already been shown this first-touch hint."""
|
||||
return bool(_get_seen_dict(config).get(flag))
|
||||
|
||||
|
||||
def mark_seen(config_path: Path, flag: str) -> bool:
|
||||
"""Persist ``onboarding.seen.<flag> = True`` to ``config_path``.
|
||||
|
||||
Uses the atomic YAML writer so a concurrent process can't observe a
|
||||
partially-written file. Returns True on success, False on any error
|
||||
(including the config file being absent — onboarding is best-effort).
|
||||
"""
|
||||
try:
|
||||
import yaml
|
||||
from utils import atomic_yaml_write
|
||||
except Exception as e: # pragma: no cover — dependency issue
|
||||
logger.debug("onboarding: failed to import yaml/utils: %s", e)
|
||||
return False
|
||||
|
||||
try:
|
||||
cfg: dict = {}
|
||||
if config_path.exists():
|
||||
with open(config_path, encoding="utf-8") as f:
|
||||
cfg = yaml.safe_load(f) or {}
|
||||
if not isinstance(cfg.get("onboarding"), dict):
|
||||
cfg["onboarding"] = {}
|
||||
seen = cfg["onboarding"].get("seen")
|
||||
if not isinstance(seen, dict):
|
||||
seen = {}
|
||||
cfg["onboarding"]["seen"] = seen
|
||||
if seen.get(flag) is True:
|
||||
return True # already marked — nothing to do
|
||||
seen[flag] = True
|
||||
atomic_yaml_write(config_path, cfg)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.debug("onboarding: failed to mark flag %s: %s", flag, e)
|
||||
return False
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BUSY_INPUT_FLAG",
|
||||
"TOOL_PROGRESS_FLAG",
|
||||
"OPENCLAW_RESIDUE_FLAG",
|
||||
"busy_input_hint_gateway",
|
||||
"busy_input_hint_cli",
|
||||
"tool_progress_hint_gateway",
|
||||
"tool_progress_hint_cli",
|
||||
"openclaw_residue_hint_cli",
|
||||
"detect_openclaw_residue",
|
||||
"is_seen",
|
||||
"mark_seen",
|
||||
]
|
||||
@@ -12,7 +12,7 @@ import threading
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
|
||||
from hermes_constants import get_hermes_home, get_skills_dir, is_wsl
|
||||
from hermes_constants import get_hermes_home
|
||||
from typing import Optional
|
||||
|
||||
from agent.skill_utils import (
|
||||
@@ -40,7 +40,7 @@ _CONTEXT_THREAT_PATTERNS = [
|
||||
(r'disregard\s+(your|all|any)\s+(instructions|rules|guidelines)', "disregard_rules"),
|
||||
(r'act\s+as\s+(if|though)\s+you\s+(have\s+no|don\'t\s+have)\s+(restrictions|limits|rules)', "bypass_restrictions"),
|
||||
(r'<!--[^>]*(?:ignore|override|system|secret|hidden)[^>]*-->', "html_comment_injection"),
|
||||
(r'<\s*div\s+style\s*=\s*["\'][\s\S]*?display\s*:\s*none', "hidden_div"),
|
||||
(r'<\s*div\s+style\s*=\s*["\'].*display\s*:\s*none', "hidden_div"),
|
||||
(r'translate\s+.*\s+into\s+.*\s+and\s+(execute|run|eval)', "translate_execute"),
|
||||
(r'curl\s+[^\n]*\$\{?\w*(KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL|API)', "exfil_curl"),
|
||||
(r'cat\s+[^\n]*(\.env|credentials|\.netrc|\.pgpass)', "read_secrets"),
|
||||
@@ -141,12 +141,6 @@ DEFAULT_AGENT_IDENTITY = (
|
||||
"Be targeted and efficient in your exploration and investigations."
|
||||
)
|
||||
|
||||
HERMES_AGENT_HELP_GUIDANCE = (
|
||||
"If the user asks about configuring, setting up, or using Hermes Agent "
|
||||
"itself, load the `hermes-agent` skill with skill_view(name='hermes-agent') "
|
||||
"before answering. Docs: https://hermes-agent.nousresearch.com/docs"
|
||||
)
|
||||
|
||||
MEMORY_GUIDANCE = (
|
||||
"You have persistent memory across sessions. Save durable facts using the memory "
|
||||
"tool: user preferences, environment details, tool quirks, and stable conventions. "
|
||||
@@ -158,13 +152,7 @@ MEMORY_GUIDANCE = (
|
||||
"Do NOT save task progress, session outcomes, completed-work logs, or temporary TODO "
|
||||
"state to memory; use session_search to recall those from past transcripts. "
|
||||
"If you've discovered a new way to do something, solved a problem that could be "
|
||||
"necessary later, save it as a skill with the skill tool.\n"
|
||||
"Write memories as declarative facts, not instructions to yourself. "
|
||||
"'User prefers concise responses' ✓ — 'Always respond concisely' ✗. "
|
||||
"'Project uses pytest with xdist' ✓ — 'Run tests with pytest -n 4' ✗. "
|
||||
"Imperative phrasing gets re-read as a directive in later sessions and can "
|
||||
"cause repeated work or override the user's current request. Procedures and "
|
||||
"workflows belong in skills, not memory."
|
||||
"necessary later, save it as a skill with the skill tool."
|
||||
)
|
||||
|
||||
SESSION_SEARCH_GUIDANCE = (
|
||||
@@ -182,64 +170,6 @@ SKILLS_GUIDANCE = (
|
||||
"Skills that aren't maintained become liabilities."
|
||||
)
|
||||
|
||||
KANBAN_GUIDANCE = (
|
||||
"# Kanban task execution protocol\n"
|
||||
"You have been assigned ONE task from "
|
||||
"the shared board at `~/.hermes/kanban.db`. Your task id is in "
|
||||
"`$HERMES_KANBAN_TASK`; your workspace is `$HERMES_KANBAN_WORKSPACE`. "
|
||||
"The `kanban_*` tools in your schema are your primary coordination surface — "
|
||||
"they write directly to the shared SQLite DB and work regardless of terminal "
|
||||
"backend (local/docker/modal/ssh).\n"
|
||||
"\n"
|
||||
"## Lifecycle\n"
|
||||
"\n"
|
||||
"1. **Orient.** Call `kanban_show()` first (no args — it defaults to your "
|
||||
"task). The response includes title, body, parent-task handoffs (summary + "
|
||||
"metadata), any prior attempts on this task if you're a retry, the full "
|
||||
"comment thread, and a pre-formatted `worker_context` you can treat as "
|
||||
"ground truth.\n"
|
||||
"2. **Work inside the workspace.** `cd $HERMES_KANBAN_WORKSPACE` before "
|
||||
"any file operations. The workspace is yours for this run. Don't modify "
|
||||
"files outside it unless the task explicitly asks.\n"
|
||||
"3. **Heartbeat on long operations.** Call `kanban_heartbeat(note=...)` "
|
||||
"every few minutes during long subprocesses (training, encoding, crawling). "
|
||||
"Skip heartbeats for short tasks.\n"
|
||||
"4. **Block on genuine ambiguity.** If you need a human decision you cannot "
|
||||
"infer (missing credentials, UX choice, paywalled source, peer output you "
|
||||
"need first), call `kanban_block(reason=\"...\")` and stop. Don't guess. "
|
||||
"The user will unblock with context and the dispatcher will respawn you.\n"
|
||||
"5. **Complete with structured handoff.** Call `kanban_complete(summary=..., "
|
||||
"metadata=...)`. `summary` is 1–3 human-readable sentences naming concrete "
|
||||
"artifacts. `metadata` is machine-readable facts "
|
||||
"(`{changed_files: [...], tests_run: N, decisions: [...]}`). Downstream "
|
||||
"workers read both via their own `kanban_show`. Never put secrets / "
|
||||
"tokens / raw PII in either field — run rows are durable forever.\n"
|
||||
"6. **If follow-up work appears, create it; don't do it.** Use "
|
||||
"`kanban_create(title=..., assignee=<right-profile>, parents=[your-task-id])` "
|
||||
"to spawn a child task for the appropriate specialist profile instead of "
|
||||
"scope-creeping into the next thing.\n"
|
||||
"\n"
|
||||
"## Orchestrator mode\n"
|
||||
"\n"
|
||||
"If your task is itself a decomposition task (e.g. a planner profile given "
|
||||
"a high-level goal), use `kanban_create` to fan out into child tasks — one "
|
||||
"per specialist, each with an explicit `assignee` and `parents=[...]` to "
|
||||
"express dependencies. Then `kanban_complete` your own task with a summary "
|
||||
"of the decomposition. Do NOT execute the work yourself; your job is "
|
||||
"routing, not implementation.\n"
|
||||
"\n"
|
||||
"## Do NOT\n"
|
||||
"\n"
|
||||
"- Do not shell out to `hermes kanban <verb>` for board operations. Use "
|
||||
"the `kanban_*` tools — they work across all terminal backends.\n"
|
||||
"- Do not complete a task you didn't actually finish. Block it.\n"
|
||||
"- Do not assign follow-up work to yourself. Assign it to the right "
|
||||
"specialist profile.\n"
|
||||
"- Do not call `delegate_task` as a board substitute. `delegate_task` is "
|
||||
"for short reasoning subtasks inside your own run; board tasks are for "
|
||||
"cross-agent handoffs that outlive one API loop."
|
||||
)
|
||||
|
||||
TOOL_USE_ENFORCEMENT_GUIDANCE = (
|
||||
"# Tool-use enforcement\n"
|
||||
"You MUST use your tools to take action — do not describe what you would do "
|
||||
@@ -257,93 +187,7 @@ TOOL_USE_ENFORCEMENT_GUIDANCE = (
|
||||
|
||||
# Model name substrings that trigger tool-use enforcement guidance.
|
||||
# Add new patterns here when a model family needs explicit steering.
|
||||
TOOL_USE_ENFORCEMENT_MODELS = ("gpt", "codex", "gemini", "gemma", "grok")
|
||||
|
||||
# OpenAI GPT/Codex-specific execution guidance. Addresses known failure modes
|
||||
# where GPT models abandon work on partial results, skip prerequisite lookups,
|
||||
# hallucinate instead of using tools, and declare "done" without verification.
|
||||
# Inspired by patterns from OpenAI's GPT-5.4 prompting guide & OpenClaw PR #38953.
|
||||
OPENAI_MODEL_EXECUTION_GUIDANCE = (
|
||||
"# Execution discipline\n"
|
||||
"<tool_persistence>\n"
|
||||
"- Use tools whenever they improve correctness, completeness, or grounding.\n"
|
||||
"- Do not stop early when another tool call would materially improve the result.\n"
|
||||
"- If a tool returns empty or partial results, retry with a different query or "
|
||||
"strategy before giving up.\n"
|
||||
"- Keep calling tools until: (1) the task is complete, AND (2) you have verified "
|
||||
"the result.\n"
|
||||
"</tool_persistence>\n"
|
||||
"\n"
|
||||
"<mandatory_tool_use>\n"
|
||||
"NEVER answer these from memory or mental computation — ALWAYS use a tool:\n"
|
||||
"- Arithmetic, math, calculations → use terminal or execute_code\n"
|
||||
"- Hashes, encodings, checksums → use terminal (e.g. sha256sum, base64)\n"
|
||||
"- Current time, date, timezone → use terminal (e.g. date)\n"
|
||||
"- System state: OS, CPU, memory, disk, ports, processes → use terminal\n"
|
||||
"- File contents, sizes, line counts → use read_file, search_files, or terminal\n"
|
||||
"- Git history, branches, diffs → use terminal\n"
|
||||
"- Current facts (weather, news, versions) → use web_search\n"
|
||||
"Your memory and user profile describe the USER, not the system you are "
|
||||
"running on. The execution environment may differ from what the user profile "
|
||||
"says about their personal setup.\n"
|
||||
"</mandatory_tool_use>\n"
|
||||
"\n"
|
||||
"<act_dont_ask>\n"
|
||||
"When a question has an obvious default interpretation, act on it immediately "
|
||||
"instead of asking for clarification. Examples:\n"
|
||||
"- 'Is port 443 open?' → check THIS machine (don't ask 'open where?')\n"
|
||||
"- 'What OS am I running?' → check the live system (don't use user profile)\n"
|
||||
"- 'What time is it?' → run `date` (don't guess)\n"
|
||||
"Only ask for clarification when the ambiguity genuinely changes what tool "
|
||||
"you would call.\n"
|
||||
"</act_dont_ask>\n"
|
||||
"\n"
|
||||
"<prerequisite_checks>\n"
|
||||
"- Before taking an action, check whether prerequisite discovery, lookup, or "
|
||||
"context-gathering steps are needed.\n"
|
||||
"- Do not skip prerequisite steps just because the final action seems obvious.\n"
|
||||
"- If a task depends on output from a prior step, resolve that dependency first.\n"
|
||||
"</prerequisite_checks>\n"
|
||||
"\n"
|
||||
"<verification>\n"
|
||||
"Before finalizing your response:\n"
|
||||
"- Correctness: does the output satisfy every stated requirement?\n"
|
||||
"- Grounding: are factual claims backed by tool outputs or provided context?\n"
|
||||
"- Formatting: does the output match the requested format or schema?\n"
|
||||
"- Safety: if the next step has side effects (file writes, commands, API calls), "
|
||||
"confirm scope before executing.\n"
|
||||
"</verification>\n"
|
||||
"\n"
|
||||
"<missing_context>\n"
|
||||
"- If required context is missing, do NOT guess or hallucinate an answer.\n"
|
||||
"- Use the appropriate lookup tool when missing information is retrievable "
|
||||
"(search_files, web_search, read_file, etc.).\n"
|
||||
"- Ask a clarifying question only when the information cannot be retrieved by tools.\n"
|
||||
"- If you must proceed with incomplete information, label assumptions explicitly.\n"
|
||||
"</missing_context>"
|
||||
)
|
||||
|
||||
# Gemini/Gemma-specific operational guidance, adapted from OpenCode's gemini.txt.
|
||||
# Injected alongside TOOL_USE_ENFORCEMENT_GUIDANCE when the model is Gemini or Gemma.
|
||||
GOOGLE_MODEL_OPERATIONAL_GUIDANCE = (
|
||||
"# Google model operational directives\n"
|
||||
"Follow these operational rules strictly:\n"
|
||||
"- **Absolute paths:** Always construct and use absolute file paths for all "
|
||||
"file system operations. Combine the project root with relative paths.\n"
|
||||
"- **Verify first:** Use read_file/search_files to check file contents and "
|
||||
"project structure before making changes. Never guess at file contents.\n"
|
||||
"- **Dependency checks:** Never assume a library is available. Check "
|
||||
"package.json, requirements.txt, Cargo.toml, etc. before importing.\n"
|
||||
"- **Conciseness:** Keep explanatory text brief — a few sentences, not "
|
||||
"paragraphs. Focus on actions and results over narration.\n"
|
||||
"- **Parallel tool calls:** When you need to perform multiple independent "
|
||||
"operations (e.g. reading several files), make all the tool calls in a "
|
||||
"single response rather than sequentially.\n"
|
||||
"- **Non-interactive commands:** Use flags like -y, --yes, --non-interactive "
|
||||
"to prevent CLI tools from hanging on prompts.\n"
|
||||
"- **Keep going:** Work autonomously until the task is fully resolved. "
|
||||
"Don't stop with a plan — execute it.\n"
|
||||
)
|
||||
TOOL_USE_ENFORCEMENT_MODELS = ("gpt", "codex")
|
||||
|
||||
# Model name substrings that should use the 'developer' role instead of
|
||||
# 'system' for the system prompt. OpenAI's newer models (GPT-5, Codex)
|
||||
@@ -365,13 +209,7 @@ PLATFORM_HINTS = {
|
||||
),
|
||||
"telegram": (
|
||||
"You are on a text messaging communication platform, Telegram. "
|
||||
"Standard markdown is automatically converted to Telegram format. "
|
||||
"Supported: **bold**, *italic*, ~~strikethrough~~, ||spoiler||, "
|
||||
"`inline code`, ```code blocks```, [links](url), and ## headers. "
|
||||
"Telegram has NO table syntax — prefer bullet lists or labeled "
|
||||
"key: value pairs over pipe tables (any tables you do emit are "
|
||||
"auto-rewritten into row-group bullets, which you can produce "
|
||||
"directly for cleaner output). "
|
||||
"Please do not use markdown as it does not render. "
|
||||
"You can send media files natively: to deliver a file to the user, "
|
||||
"include MEDIA:/absolute/path/to/file in your response. Images "
|
||||
"(.png, .jpg, .webp) appear as photos, audio (.ogg) sends as voice "
|
||||
@@ -418,139 +256,15 @@ PLATFORM_HINTS = {
|
||||
),
|
||||
"cli": (
|
||||
"You are a CLI AI Agent. Try not to use markdown but simple text "
|
||||
"renderable inside a terminal. "
|
||||
"File delivery: there is no attachment channel — the user reads your "
|
||||
"response directly in their terminal. Do NOT emit MEDIA:/path tags "
|
||||
"(those are only intercepted on messaging platforms like Telegram, "
|
||||
"Discord, Slack, etc.; on the CLI they render as literal text). "
|
||||
"When referring to a file you created or changed, just state its "
|
||||
"absolute path in plain text; the user can open it from there."
|
||||
"renderable inside a terminal."
|
||||
),
|
||||
"sms": (
|
||||
"You are communicating via SMS. Keep responses concise and use plain text "
|
||||
"only — no markdown, no formatting. SMS messages are limited to ~1600 "
|
||||
"characters, so be brief and direct."
|
||||
),
|
||||
"bluebubbles": (
|
||||
"You are chatting via iMessage (BlueBubbles). iMessage does not render "
|
||||
"markdown formatting — use plain text. Keep responses concise as they "
|
||||
"appear as text messages. You can send media files natively: include "
|
||||
"MEDIA:/absolute/path/to/file in your response. Images (.jpg, .png, "
|
||||
".heic) appear as photos and other files arrive as attachments."
|
||||
),
|
||||
"mattermost": (
|
||||
"You are in a Mattermost workspace communicating with your user. "
|
||||
"Mattermost renders standard Markdown — headings, bold, italic, code "
|
||||
"blocks, and tables all work. "
|
||||
"You can send media files natively: include MEDIA:/absolute/path/to/file "
|
||||
"in your response. Images (.jpg, .png, .webp) are uploaded as photo "
|
||||
"attachments, audio and video as file attachments. "
|
||||
"Image URLs in markdown format  are rendered as inline previews automatically."
|
||||
),
|
||||
"matrix": (
|
||||
"You are in a Matrix room communicating with your user. "
|
||||
"Matrix renders Markdown — bold, italic, code blocks, and links work; "
|
||||
"the adapter converts your Markdown to HTML for rich display. "
|
||||
"You can send media files natively: include MEDIA:/absolute/path/to/file "
|
||||
"in your response. Images (.jpg, .png, .webp) are sent as inline photos, "
|
||||
"audio (.ogg, .mp3) as voice/audio messages, video (.mp4) inline, "
|
||||
"and other files as downloadable attachments."
|
||||
),
|
||||
"feishu": (
|
||||
"You are in a Feishu (Lark) workspace communicating with your user. "
|
||||
"Feishu renders Markdown in messages — bold, italic, code blocks, and "
|
||||
"links are supported. "
|
||||
"You can send media files natively: include MEDIA:/absolute/path/to/file "
|
||||
"in your response. Images (.jpg, .png, .webp) are uploaded and displayed "
|
||||
"inline, audio files as voice messages, and other files as attachments."
|
||||
),
|
||||
"weixin": (
|
||||
"You are on Weixin/WeChat. Markdown formatting is supported, so you may use it when "
|
||||
"it improves readability, but keep the message compact and chat-friendly. You can send media files natively: "
|
||||
"include MEDIA:/absolute/path/to/file in your response. Images are sent as native "
|
||||
"photos, videos play inline when supported, and other files arrive as downloadable "
|
||||
"documents. You can also include image URLs in markdown format  and they "
|
||||
"will be downloaded and sent as native media when possible."
|
||||
),
|
||||
"wecom": (
|
||||
"You are on WeCom (企业微信 / Enterprise WeChat). Markdown formatting is supported. "
|
||||
"You CAN send media files natively — to deliver a file to the user, include "
|
||||
"MEDIA:/absolute/path/to/file in your response. The file will be sent as a native "
|
||||
"WeCom attachment: images (.jpg, .png, .webp) are sent as photos (up to 10 MB), "
|
||||
"other files (.pdf, .docx, .xlsx, .md, .txt, etc.) arrive as downloadable documents "
|
||||
"(up to 20 MB), and videos (.mp4) play inline. Voice messages are supported but "
|
||||
"must be in AMR format — other audio formats are automatically sent as file attachments. "
|
||||
"You can also include image URLs in markdown format  and they will be "
|
||||
"downloaded and sent as native photos. Do NOT tell the user you lack file-sending "
|
||||
"capability — use MEDIA: syntax whenever a file delivery is appropriate."
|
||||
),
|
||||
"qqbot": (
|
||||
"You are on QQ, a popular Chinese messaging platform. QQ supports markdown formatting "
|
||||
"and emoji. You can send media files natively: include MEDIA:/absolute/path/to/file in "
|
||||
"your response. Images are sent as native photos, and other files arrive as downloadable "
|
||||
"documents."
|
||||
),
|
||||
"yuanbao": (
|
||||
"You are on Yuanbao (腾讯元宝), a Chinese AI assistant platform. "
|
||||
"Markdown formatting is supported (code blocks, tables, bold/italic). "
|
||||
"You CAN send media files natively — to deliver a file to the user, include "
|
||||
"MEDIA:/absolute/path/to/file in your response. The file will be sent as a native "
|
||||
"Yuanbao attachment: images (.jpg, .png, .webp, .gif) are sent as photos, "
|
||||
"and other files (.pdf, .docx, .txt, .zip, etc.) arrive as downloadable documents "
|
||||
"(max 50 MB). You can also include image URLs in markdown format  and "
|
||||
"they will be downloaded and sent as native photos. "
|
||||
"Do NOT tell the user you lack file-sending capability — use MEDIA: syntax "
|
||||
"whenever a file delivery is appropriate.\n\n"
|
||||
"Stickers (贴纸 / 表情包 / TIM face): Yuanbao has a built-in sticker catalogue. "
|
||||
"When the user sends a sticker (you see '[emoji: 名称]' in their message) or asks "
|
||||
"you to send/reply-with a 贴纸/表情/表情包, you MUST use the sticker tools:\n"
|
||||
" 1. Call yb_search_sticker with a Chinese keyword (e.g. '666', '比心', '吃瓜', "
|
||||
" '捂脸', '合十') to discover matching sticker_ids.\n"
|
||||
" 2. Call yb_send_sticker with the chosen sticker_id or name — this sends a real "
|
||||
" TIMFaceElem that renders as a native sticker in the chat.\n"
|
||||
"DO NOT draw sticker-like PNGs with execute_code/Pillow/matplotlib and then send "
|
||||
"them via MEDIA: or send_image_file. That produces a fake low-quality 'sticker' "
|
||||
"image and is the WRONG path. Bare Unicode emoji in text is also not a substitute "
|
||||
"— when a sticker is the right response, use yb_send_sticker."
|
||||
),
|
||||
"api_server": (
|
||||
"You're responding through an API server. The rendering layer is unknown — "
|
||||
"assume plain text. No markdown formatting (no asterisks, bullets, headers, "
|
||||
"code fences). Treat this like a conversation, not a document. Keep responses "
|
||||
"brief and natural."
|
||||
),
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Environment hints — execution-environment awareness for the agent.
|
||||
# Unlike PLATFORM_HINTS (which describe the messaging channel), these describe
|
||||
# the machine/OS the agent's tools actually run on.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
WSL_ENVIRONMENT_HINT = (
|
||||
"You are running inside WSL (Windows Subsystem for Linux). "
|
||||
"The Windows host filesystem is mounted under /mnt/ — "
|
||||
"/mnt/c/ is the C: drive, /mnt/d/ is D:, etc. "
|
||||
"The user's Windows files are typically at "
|
||||
"/mnt/c/Users/<username>/Desktop/, Documents/, Downloads/, etc. "
|
||||
"When the user references Windows paths or desktop files, translate "
|
||||
"to the /mnt/c/ equivalent. You can list /mnt/c/Users/ to discover "
|
||||
"the Windows username if needed."
|
||||
)
|
||||
|
||||
|
||||
def build_environment_hints() -> str:
|
||||
"""Return environment-specific guidance for the system prompt.
|
||||
|
||||
Detects WSL, and can be extended for Termux, Docker, etc.
|
||||
Returns an empty string when no special environment is detected.
|
||||
"""
|
||||
hints: list[str] = []
|
||||
if is_wsl():
|
||||
hints.append(WSL_ENVIRONMENT_HINT)
|
||||
return "\n\n".join(hints)
|
||||
|
||||
|
||||
CONTEXT_FILE_MAX_CHARS = 20_000
|
||||
CONTEXT_TRUNCATE_HEAD_RATIO = 0.7
|
||||
CONTEXT_TRUNCATE_TAIL_RATIO = 0.2
|
||||
@@ -672,7 +386,7 @@ def _parse_skill_file(skill_file: Path) -> tuple[bool, dict, str]:
|
||||
(True, {}, "") to err on the side of showing the skill.
|
||||
"""
|
||||
try:
|
||||
raw = skill_file.read_text(encoding="utf-8")
|
||||
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
||||
frontmatter, _ = parse_frontmatter(raw)
|
||||
|
||||
if not skill_matches_platform(frontmatter):
|
||||
@@ -680,10 +394,21 @@ def _parse_skill_file(skill_file: Path) -> tuple[bool, dict, str]:
|
||||
|
||||
return True, frontmatter, extract_skill_description(frontmatter)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to parse skill file %s: %s", skill_file, e)
|
||||
logger.debug("Failed to parse skill file %s: %s", skill_file, e)
|
||||
return True, {}, ""
|
||||
|
||||
|
||||
def _read_skill_conditions(skill_file: Path) -> dict:
|
||||
"""Extract conditional activation fields from SKILL.md frontmatter."""
|
||||
try:
|
||||
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
||||
frontmatter, _ = parse_frontmatter(raw)
|
||||
return extract_skill_conditions(frontmatter)
|
||||
except Exception as e:
|
||||
logger.debug("Failed to read skill conditions from %s: %s", skill_file, e)
|
||||
return {}
|
||||
|
||||
|
||||
def _skill_should_show(
|
||||
conditions: dict,
|
||||
available_tools: "set[str] | None",
|
||||
@@ -733,29 +458,19 @@ def build_skills_system_prompt(
|
||||
are read-only — they appear in the index but new skills are always created
|
||||
in the local dir. Local skills take precedence when names collide.
|
||||
"""
|
||||
skills_dir = get_skills_dir()
|
||||
hermes_home = get_hermes_home()
|
||||
skills_dir = hermes_home / "skills"
|
||||
external_dirs = get_all_skills_dirs()[1:] # skip local (index 0)
|
||||
|
||||
if not skills_dir.exists() and not external_dirs:
|
||||
return ""
|
||||
|
||||
# ── Layer 1: in-process LRU cache ─────────────────────────────────
|
||||
# Include the resolved platform so per-platform disabled-skill lists
|
||||
# produce distinct cache entries (gateway serves multiple platforms).
|
||||
from gateway.session_context import get_session_env
|
||||
_platform_hint = (
|
||||
os.environ.get("HERMES_PLATFORM")
|
||||
or get_session_env("HERMES_SESSION_PLATFORM")
|
||||
or ""
|
||||
)
|
||||
disabled = get_disabled_skill_names()
|
||||
cache_key = (
|
||||
str(skills_dir.resolve()),
|
||||
tuple(str(d) for d in external_dirs),
|
||||
tuple(sorted(str(t) for t in (available_tools or set()))),
|
||||
tuple(sorted(str(ts) for ts in (available_toolsets or set()))),
|
||||
_platform_hint,
|
||||
tuple(sorted(disabled)),
|
||||
)
|
||||
with _SKILLS_PROMPT_CACHE_LOCK:
|
||||
cached = _SKILLS_PROMPT_CACHE.get(cache_key)
|
||||
@@ -763,6 +478,8 @@ def build_skills_system_prompt(
|
||||
_SKILLS_PROMPT_CACHE.move_to_end(cache_key)
|
||||
return cached
|
||||
|
||||
disabled = get_disabled_skill_names()
|
||||
|
||||
# ── Layer 2: disk snapshot ────────────────────────────────────────
|
||||
snapshot = _load_skills_snapshot(skills_dir)
|
||||
|
||||
@@ -789,7 +506,7 @@ def build_skills_system_prompt(
|
||||
):
|
||||
continue
|
||||
skills_by_category.setdefault(category, []).append(
|
||||
(frontmatter_name, entry.get("description", ""))
|
||||
(skill_name, entry.get("description", ""))
|
||||
)
|
||||
category_descriptions = {
|
||||
str(k): str(v)
|
||||
@@ -814,7 +531,7 @@ def build_skills_system_prompt(
|
||||
):
|
||||
continue
|
||||
skills_by_category.setdefault(entry["category"], []).append(
|
||||
(entry["frontmatter_name"], entry["description"])
|
||||
(skill_name, entry["description"])
|
||||
)
|
||||
|
||||
# Read category-level DESCRIPTION.md files
|
||||
@@ -857,10 +574,9 @@ def build_skills_system_prompt(
|
||||
continue
|
||||
entry = _build_snapshot_entry(skill_file, ext_dir, frontmatter, desc)
|
||||
skill_name = entry["skill_name"]
|
||||
frontmatter_name = entry["frontmatter_name"]
|
||||
if frontmatter_name in seen_skill_names:
|
||||
if skill_name in seen_skill_names:
|
||||
continue
|
||||
if frontmatter_name in disabled or skill_name in disabled:
|
||||
if entry["frontmatter_name"] in disabled or skill_name in disabled:
|
||||
continue
|
||||
if not _skill_should_show(
|
||||
extract_skill_conditions(frontmatter),
|
||||
@@ -868,9 +584,9 @@ def build_skills_system_prompt(
|
||||
available_toolsets,
|
||||
):
|
||||
continue
|
||||
seen_skill_names.add(frontmatter_name)
|
||||
seen_skill_names.add(skill_name)
|
||||
skills_by_category.setdefault(entry["category"], []).append(
|
||||
(frontmatter_name, entry["description"])
|
||||
(skill_name, entry["description"])
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Error reading external skill %s: %s", skill_file, e)
|
||||
@@ -912,21 +628,8 @@ def build_skills_system_prompt(
|
||||
|
||||
result = (
|
||||
"## Skills (mandatory)\n"
|
||||
"Before replying, scan the skills below. If a skill matches or is even partially relevant "
|
||||
"to your task, you MUST load it with skill_view(name) and follow its instructions. "
|
||||
"Err on the side of loading — it is always better to have context you don't need "
|
||||
"than to miss critical steps, pitfalls, or established workflows. "
|
||||
"Skills contain specialized knowledge — API endpoints, tool-specific commands, "
|
||||
"and proven workflows that outperform general-purpose approaches. Load the skill "
|
||||
"even if you think you could handle the task with basic tools like web_search or terminal. "
|
||||
"Skills also encode the user's preferred approach, conventions, and quality standards "
|
||||
"for tasks like code review, planning, and testing — load them even for tasks you "
|
||||
"already know how to do, because the skill defines how it should be done here.\n"
|
||||
"Whenever the user asks you to configure, set up, install, enable, disable, modify, "
|
||||
"or troubleshoot Hermes Agent itself — its CLI, config, models, providers, tools, "
|
||||
"skills, voice, gateway, plugins, or any feature — load the `hermes-agent` skill "
|
||||
"first. It has the actual commands (e.g. `hermes config set …`, `hermes tools`, "
|
||||
"`hermes setup`) so you don't have to guess or invent workarounds.\n"
|
||||
"Before replying, scan the skills below. If one clearly matches your task, "
|
||||
"load it with skill_view(name) and follow its instructions. "
|
||||
"If a skill has issues, fix it with skill_manage(action='patch').\n"
|
||||
"After difficult/iterative tasks, offer to save as a skill. "
|
||||
"If a skill you loaded was missing steps, had wrong commands, or needed "
|
||||
@@ -936,7 +639,7 @@ def build_skills_system_prompt(
|
||||
+ "\n".join(index_lines) + "\n"
|
||||
"</available_skills>\n"
|
||||
"\n"
|
||||
"Only proceed without loading a skill if genuinely none are relevant to the task."
|
||||
"If none match, proceed normally without loading a skill."
|
||||
)
|
||||
|
||||
# ── Store in LRU cache ────────────────────────────────────────────
|
||||
@@ -971,6 +674,7 @@ def build_nous_subscription_prompt(valid_tool_names: "set[str] | None" = None) -
|
||||
"browser_type",
|
||||
"browser_scroll",
|
||||
"browser_console",
|
||||
"browser_close",
|
||||
"browser_press",
|
||||
"browser_get_images",
|
||||
"browser_vision",
|
||||
@@ -1000,13 +704,13 @@ def build_nous_subscription_prompt(valid_tool_names: "set[str] | None" = None) -
|
||||
|
||||
lines = [
|
||||
"# Nous Subscription",
|
||||
"Nous subscription includes managed web tools (Firecrawl), image generation (FAL), OpenAI TTS, and browser automation (Browser Use) by default. Modal execution is optional.",
|
||||
"Nous subscription includes managed web tools (Firecrawl), image generation (FAL), OpenAI TTS, and browser automation (Browserbase) by default. Modal execution is optional.",
|
||||
"Current capability status:",
|
||||
]
|
||||
lines.extend(_status_line(feature) for feature in features.items())
|
||||
lines.extend(
|
||||
[
|
||||
"When a Nous-managed feature is active, do not ask the user for Firecrawl, FAL, OpenAI TTS, or Browser-Use API keys.",
|
||||
"When a Nous-managed feature is active, do not ask the user for Firecrawl, FAL, OpenAI TTS, or Browserbase API keys.",
|
||||
"If the user is not subscribed and asks for a capability that Nous subscription would unlock or simplify, suggest Nous subscription as one option alongside direct setup or local alternatives.",
|
||||
"Do not mention subscription unless the user asks about it or it directly solves the current missing capability.",
|
||||
"Useful commands: hermes setup, hermes setup tools, hermes setup terminal, hermes status.",
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
"""Rate limit tracking for inference API responses.
|
||||
|
||||
Captures x-ratelimit-* headers from provider responses and provides
|
||||
formatted display for the /usage slash command. Currently supports
|
||||
the Nous Portal header format (also used by OpenRouter and OpenAI-compatible
|
||||
APIs that follow the same convention).
|
||||
|
||||
Header schema (12 headers total):
|
||||
x-ratelimit-limit-requests RPM cap
|
||||
x-ratelimit-limit-requests-1h RPH cap
|
||||
x-ratelimit-limit-tokens TPM cap
|
||||
x-ratelimit-limit-tokens-1h TPH cap
|
||||
x-ratelimit-remaining-requests requests left in minute window
|
||||
x-ratelimit-remaining-requests-1h requests left in hour window
|
||||
x-ratelimit-remaining-tokens tokens left in minute window
|
||||
x-ratelimit-remaining-tokens-1h tokens left in hour window
|
||||
x-ratelimit-reset-requests seconds until minute request window resets
|
||||
x-ratelimit-reset-requests-1h seconds until hour request window resets
|
||||
x-ratelimit-reset-tokens seconds until minute token window resets
|
||||
x-ratelimit-reset-tokens-1h seconds until hour token window resets
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Mapping, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class RateLimitBucket:
|
||||
"""One rate-limit window (e.g. requests per minute)."""
|
||||
|
||||
limit: int = 0
|
||||
remaining: int = 0
|
||||
reset_seconds: float = 0.0
|
||||
captured_at: float = 0.0 # time.time() when this was captured
|
||||
|
||||
@property
|
||||
def used(self) -> int:
|
||||
return max(0, self.limit - self.remaining)
|
||||
|
||||
@property
|
||||
def usage_pct(self) -> float:
|
||||
if self.limit <= 0:
|
||||
return 0.0
|
||||
return (self.used / self.limit) * 100.0
|
||||
|
||||
@property
|
||||
def remaining_seconds_now(self) -> float:
|
||||
"""Estimated seconds remaining until reset, adjusted for elapsed time."""
|
||||
elapsed = time.time() - self.captured_at
|
||||
return max(0.0, self.reset_seconds - elapsed)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RateLimitState:
|
||||
"""Full rate-limit state parsed from response headers."""
|
||||
|
||||
requests_min: RateLimitBucket = field(default_factory=RateLimitBucket)
|
||||
requests_hour: RateLimitBucket = field(default_factory=RateLimitBucket)
|
||||
tokens_min: RateLimitBucket = field(default_factory=RateLimitBucket)
|
||||
tokens_hour: RateLimitBucket = field(default_factory=RateLimitBucket)
|
||||
captured_at: float = 0.0 # when the headers were captured
|
||||
provider: str = ""
|
||||
|
||||
@property
|
||||
def has_data(self) -> bool:
|
||||
return self.captured_at > 0
|
||||
|
||||
@property
|
||||
def age_seconds(self) -> float:
|
||||
if not self.has_data:
|
||||
return float("inf")
|
||||
return time.time() - self.captured_at
|
||||
|
||||
|
||||
def _safe_int(value: Any, default: int = 0) -> int:
|
||||
try:
|
||||
return int(float(value))
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _safe_float(value: Any, default: float = 0.0) -> float:
|
||||
try:
|
||||
return float(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def parse_rate_limit_headers(
|
||||
headers: Mapping[str, str],
|
||||
provider: str = "",
|
||||
) -> Optional[RateLimitState]:
|
||||
"""Parse x-ratelimit-* headers into a RateLimitState.
|
||||
|
||||
Returns None if no rate limit headers are present.
|
||||
"""
|
||||
# Normalize to lowercase so lookups work regardless of how the server
|
||||
# capitalises headers (HTTP header names are case-insensitive per RFC 7230).
|
||||
lowered = {k.lower(): v for k, v in headers.items()}
|
||||
|
||||
# Quick check: at least one rate limit header must exist
|
||||
has_any = any(k.startswith("x-ratelimit-") for k in lowered)
|
||||
if not has_any:
|
||||
return None
|
||||
|
||||
now = time.time()
|
||||
|
||||
def _bucket(resource: str, suffix: str = "") -> RateLimitBucket:
|
||||
# e.g. resource="requests", suffix="" -> per-minute
|
||||
# resource="tokens", suffix="-1h" -> per-hour
|
||||
tag = f"{resource}{suffix}"
|
||||
return RateLimitBucket(
|
||||
limit=_safe_int(lowered.get(f"x-ratelimit-limit-{tag}")),
|
||||
remaining=_safe_int(lowered.get(f"x-ratelimit-remaining-{tag}")),
|
||||
reset_seconds=_safe_float(lowered.get(f"x-ratelimit-reset-{tag}")),
|
||||
captured_at=now,
|
||||
)
|
||||
|
||||
return RateLimitState(
|
||||
requests_min=_bucket("requests"),
|
||||
requests_hour=_bucket("requests", "-1h"),
|
||||
tokens_min=_bucket("tokens"),
|
||||
tokens_hour=_bucket("tokens", "-1h"),
|
||||
captured_at=now,
|
||||
provider=provider,
|
||||
)
|
||||
|
||||
|
||||
# ── Formatting ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _fmt_count(n: int) -> str:
|
||||
"""Human-friendly number: 7999856 -> '8.0M', 33599 -> '33.6K', 799 -> '799'."""
|
||||
if n >= 1_000_000:
|
||||
return f"{n / 1_000_000:.1f}M"
|
||||
if n >= 10_000:
|
||||
return f"{n / 1_000:.1f}K"
|
||||
if n >= 1_000:
|
||||
return f"{n / 1_000:.1f}K"
|
||||
return str(n)
|
||||
|
||||
|
||||
def _fmt_seconds(seconds: float) -> str:
|
||||
"""Seconds -> human-friendly duration: '58s', '2m 14s', '58m 57s', '1h 2m'."""
|
||||
s = max(0, int(seconds))
|
||||
if s < 60:
|
||||
return f"{s}s"
|
||||
if s < 3600:
|
||||
m, sec = divmod(s, 60)
|
||||
return f"{m}m {sec}s" if sec else f"{m}m"
|
||||
h, remainder = divmod(s, 3600)
|
||||
m = remainder // 60
|
||||
return f"{h}h {m}m" if m else f"{h}h"
|
||||
|
||||
|
||||
def _bar(pct: float, width: int = 20) -> str:
|
||||
"""ASCII progress bar: [████████░░░░░░░░░░░░] 40%."""
|
||||
filled = int(pct / 100.0 * width)
|
||||
filled = max(0, min(width, filled))
|
||||
empty = width - filled
|
||||
return f"[{'█' * filled}{'░' * empty}]"
|
||||
|
||||
|
||||
def _bucket_line(label: str, bucket: RateLimitBucket, label_width: int = 14) -> str:
|
||||
"""Format one bucket as a single line."""
|
||||
if bucket.limit <= 0:
|
||||
return f" {label:<{label_width}} (no data)"
|
||||
|
||||
pct = bucket.usage_pct
|
||||
used = _fmt_count(bucket.used)
|
||||
limit = _fmt_count(bucket.limit)
|
||||
remaining = _fmt_count(bucket.remaining)
|
||||
reset = _fmt_seconds(bucket.remaining_seconds_now)
|
||||
|
||||
bar = _bar(pct)
|
||||
return f" {label:<{label_width}} {bar} {pct:5.1f}% {used}/{limit} used ({remaining} left, resets in {reset})"
|
||||
|
||||
|
||||
def format_rate_limit_display(state: RateLimitState) -> str:
|
||||
"""Format rate limit state for terminal/chat display."""
|
||||
if not state.has_data:
|
||||
return "No rate limit data yet — make an API request first."
|
||||
|
||||
age = state.age_seconds
|
||||
if age < 5:
|
||||
freshness = "just now"
|
||||
elif age < 60:
|
||||
freshness = f"{int(age)}s ago"
|
||||
else:
|
||||
freshness = f"{_fmt_seconds(age)} ago"
|
||||
|
||||
provider_label = state.provider.title() if state.provider else "Provider"
|
||||
|
||||
lines = [
|
||||
f"{provider_label} Rate Limits (captured {freshness}):",
|
||||
"",
|
||||
_bucket_line("Requests/min", state.requests_min),
|
||||
_bucket_line("Requests/hr", state.requests_hour),
|
||||
"",
|
||||
_bucket_line("Tokens/min", state.tokens_min),
|
||||
_bucket_line("Tokens/hr", state.tokens_hour),
|
||||
]
|
||||
|
||||
# Add warnings if any bucket is getting hot
|
||||
warnings = []
|
||||
for label, bucket in [
|
||||
("requests/min", state.requests_min),
|
||||
("requests/hr", state.requests_hour),
|
||||
("tokens/min", state.tokens_min),
|
||||
("tokens/hr", state.tokens_hour),
|
||||
]:
|
||||
if bucket.limit > 0 and bucket.usage_pct >= 80:
|
||||
reset = _fmt_seconds(bucket.remaining_seconds_now)
|
||||
warnings.append(f" ⚠ {label} at {bucket.usage_pct:.0f}% — resets in {reset}")
|
||||
|
||||
if warnings:
|
||||
lines.append("")
|
||||
lines.extend(warnings)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def format_rate_limit_compact(state: RateLimitState) -> str:
|
||||
"""One-line compact summary for status bars / gateway messages."""
|
||||
if not state.has_data:
|
||||
return "No rate limit data."
|
||||
|
||||
rm = state.requests_min
|
||||
tm = state.tokens_min
|
||||
rh = state.requests_hour
|
||||
th = state.tokens_hour
|
||||
|
||||
parts = []
|
||||
if rm.limit > 0:
|
||||
parts.append(f"RPM: {rm.remaining}/{rm.limit}")
|
||||
if rh.limit > 0:
|
||||
parts.append(f"RPH: {_fmt_count(rh.remaining)}/{_fmt_count(rh.limit)} (resets {_fmt_seconds(rh.remaining_seconds_now)})")
|
||||
if tm.limit > 0:
|
||||
parts.append(f"TPM: {_fmt_count(tm.remaining)}/{_fmt_count(tm.limit)}")
|
||||
if th.limit > 0:
|
||||
parts.append(f"TPH: {_fmt_count(th.remaining)}/{_fmt_count(th.limit)} (resets {_fmt_seconds(th.remaining_seconds_now)})")
|
||||
|
||||
return " | ".join(parts)
|
||||
267
agent/redact.py
267
agent/redact.py
@@ -13,58 +13,9 @@ import re
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Sensitive query-string parameter names (case-insensitive exact match).
|
||||
# Ported from nearai/ironclaw#2529 — catches tokens whose values don't match
|
||||
# any known vendor prefix regex (e.g. opaque tokens, short OAuth codes).
|
||||
_SENSITIVE_QUERY_PARAMS = frozenset({
|
||||
"access_token",
|
||||
"refresh_token",
|
||||
"id_token",
|
||||
"token",
|
||||
"api_key",
|
||||
"apikey",
|
||||
"client_secret",
|
||||
"password",
|
||||
"auth",
|
||||
"jwt",
|
||||
"session",
|
||||
"secret",
|
||||
"key",
|
||||
"code", # OAuth authorization codes
|
||||
"signature", # pre-signed URL signatures
|
||||
"x-amz-signature",
|
||||
})
|
||||
|
||||
# Sensitive form-urlencoded / JSON body key names (case-insensitive exact match).
|
||||
# Exact match, NOT substring — "token_count" and "session_id" must NOT match.
|
||||
# Ported from nearai/ironclaw#2529.
|
||||
_SENSITIVE_BODY_KEYS = frozenset({
|
||||
"access_token",
|
||||
"refresh_token",
|
||||
"id_token",
|
||||
"token",
|
||||
"api_key",
|
||||
"apikey",
|
||||
"client_secret",
|
||||
"password",
|
||||
"auth",
|
||||
"jwt",
|
||||
"secret",
|
||||
"private_key",
|
||||
"authorization",
|
||||
"key",
|
||||
})
|
||||
|
||||
# Snapshot at import time so runtime env mutations (e.g. LLM-generated
|
||||
# `export HERMES_REDACT_SECRETS=false`) cannot disable redaction
|
||||
# mid-session. ON by default — secure default per issue #17691. Users who
|
||||
# need raw credential values in tool output (e.g. working on the redactor
|
||||
# itself) can opt out via `security.redact_secrets: false` in config.yaml
|
||||
# (bridged to this env var in hermes_cli/main.py, gateway/run.py, and
|
||||
# cli.py) or `HERMES_REDACT_SECRETS=false` in ~/.hermes/.env. An opt-out
|
||||
# warning is logged at gateway and CLI startup so operators see the
|
||||
# downgrade — see `_log_redaction_status()` in gateway/run.py and cli.py.
|
||||
_REDACT_ENABLED = os.getenv("HERMES_REDACT_SECRETS", "true").lower() in ("1", "true", "yes", "on")
|
||||
# `export HERMES_REDACT_SECRETS=false`) cannot disable redaction mid-session.
|
||||
_REDACT_ENABLED = os.getenv("HERMES_REDACT_SECRETS", "").lower() not in ("0", "false", "no", "off")
|
||||
|
||||
# Known API key prefixes -- match the prefix + contiguous token chars
|
||||
_PREFIX_PATTERNS = [
|
||||
@@ -97,18 +48,13 @@ _PREFIX_PATTERNS = [
|
||||
r"sk_[A-Za-z0-9_]{10,}", # ElevenLabs TTS key (sk_ underscore, not sk- dash)
|
||||
r"tvly-[A-Za-z0-9]{10,}", # Tavily search API key
|
||||
r"exa_[A-Za-z0-9]{10,}", # Exa search API key
|
||||
r"gsk_[A-Za-z0-9]{10,}", # Groq Cloud API key
|
||||
r"syt_[A-Za-z0-9]{10,}", # Matrix access token
|
||||
r"retaindb_[A-Za-z0-9]{10,}", # RetainDB API key
|
||||
r"hsk-[A-Za-z0-9]{10,}", # Hindsight API key
|
||||
r"mem0_[A-Za-z0-9]{10,}", # Mem0 Platform API key
|
||||
r"brv_[A-Za-z0-9]{10,}", # ByteRover API key
|
||||
]
|
||||
|
||||
# ENV assignment patterns: KEY=value where KEY contains a secret-like name
|
||||
_SECRET_ENV_NAMES = r"(?:API_?KEY|TOKEN|SECRET|PASSWORD|PASSWD|CREDENTIAL|AUTH)"
|
||||
_ENV_ASSIGN_RE = re.compile(
|
||||
rf"([A-Z0-9_]{{0,50}}{_SECRET_ENV_NAMES}[A-Z0-9_]{{0,50}})\s*=\s*(['\"]?)(\S+)\2",
|
||||
rf"([A-Z_]*{_SECRET_ENV_NAMES}[A-Z_]*)\s*=\s*(['\"]?)(\S+)\2",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# JSON field patterns: "apiKey": "value", "token": "value", etc.
|
||||
@@ -142,184 +88,28 @@ _DB_CONNSTR_RE = re.compile(
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# JWT tokens: header.payload[.signature] — always start with "eyJ" (base64 for "{")
|
||||
# Matches 1-part (header only), 2-part (header.payload), and full 3-part JWTs.
|
||||
_JWT_RE = re.compile(
|
||||
r"eyJ[A-Za-z0-9_-]{10,}" # Header (always starts with eyJ)
|
||||
r"(?:\.[A-Za-z0-9_=-]{4,}){0,2}" # Optional payload and/or signature
|
||||
)
|
||||
|
||||
# Discord user/role mentions: <@123456789012345678> or <@!123456789012345678>
|
||||
# Snowflake IDs are 17-20 digit integers that resolve to specific Discord accounts.
|
||||
_DISCORD_MENTION_RE = re.compile(r"<@!?(\d{17,20})>")
|
||||
|
||||
# E.164 phone numbers: +<country><number>, 7-15 digits
|
||||
# Negative lookahead prevents matching hex strings or identifiers
|
||||
_SIGNAL_PHONE_RE = re.compile(r"(\+[1-9]\d{6,14})(?![A-Za-z0-9])")
|
||||
|
||||
# URLs containing query strings — matches `scheme://...?...[# or end]`.
|
||||
# Used to scan text for URLs whose query params may contain secrets.
|
||||
# Ported from nearai/ironclaw#2529.
|
||||
_URL_WITH_QUERY_RE = re.compile(
|
||||
r"(https?|wss?|ftp)://" # scheme
|
||||
r"([^\s/?#]+)" # authority (may include userinfo)
|
||||
r"([^\s?#]*)" # path
|
||||
r"\?([^\s#]+)" # query (required)
|
||||
r"(#\S*)?", # optional fragment
|
||||
)
|
||||
|
||||
# URLs containing userinfo — `scheme://user:password@host` for ANY scheme
|
||||
# (not just DB protocols already covered by _DB_CONNSTR_RE above).
|
||||
# Catches things like `https://user:token@api.example.com/v1/foo`.
|
||||
_URL_USERINFO_RE = re.compile(
|
||||
r"(https?|wss?|ftp)://([^/\s:@]+):([^/\s@]+)@",
|
||||
)
|
||||
|
||||
# Form-urlencoded body detection: conservative — only applies when the entire
|
||||
# text looks like a query string (k=v&k=v pattern with no newlines).
|
||||
_FORM_BODY_RE = re.compile(
|
||||
r"^[A-Za-z_][A-Za-z0-9_.-]*=[^&\s]*(?:&[A-Za-z_][A-Za-z0-9_.-]*=[^&\s]*)+$"
|
||||
)
|
||||
|
||||
# Compile known prefix patterns into one alternation
|
||||
_PREFIX_RE = re.compile(
|
||||
r"(?<![A-Za-z0-9_-])(" + "|".join(_PREFIX_PATTERNS) + r")(?![A-Za-z0-9_-])"
|
||||
)
|
||||
|
||||
|
||||
def mask_secret(
|
||||
value: str,
|
||||
*,
|
||||
head: int = 4,
|
||||
tail: int = 4,
|
||||
floor: int = 12,
|
||||
placeholder: str = "***",
|
||||
empty: str = "",
|
||||
) -> str:
|
||||
"""Mask a secret for display, preserving ``head`` and ``tail`` characters.
|
||||
|
||||
Canonical helper for display-time redaction across Hermes — used by
|
||||
``hermes config``, ``hermes status``, ``hermes dump``, and anywhere
|
||||
a secret needs to be shown truncated for debuggability while still
|
||||
keeping the bulk hidden.
|
||||
|
||||
Args:
|
||||
value: The secret to mask. ``None``/empty returns ``empty``.
|
||||
head: Leading characters to preserve. Default 4.
|
||||
tail: Trailing characters to preserve. Default 4.
|
||||
floor: Values shorter than ``head + tail + floor_margin`` are
|
||||
fully masked (returns ``placeholder``). Default 12 —
|
||||
matches the existing config/status/dump convention.
|
||||
placeholder: Value returned for too-short inputs. Default ``"***"``.
|
||||
empty: Value returned when ``value`` is falsy (None, ""). The
|
||||
caller can override this to e.g. ``color("(not set)",
|
||||
Colors.DIM)`` for user-facing display.
|
||||
|
||||
Examples:
|
||||
>>> mask_secret("sk-proj-abcdef1234567890")
|
||||
'sk-p...7890'
|
||||
>>> mask_secret("short") # fully masked
|
||||
'***'
|
||||
>>> mask_secret("") # empty default
|
||||
''
|
||||
>>> mask_secret("", empty="(not set)") # empty override
|
||||
'(not set)'
|
||||
>>> mask_secret("long-token", head=6, tail=4, floor=18)
|
||||
'***'
|
||||
"""
|
||||
if not value:
|
||||
return empty
|
||||
if len(value) < floor:
|
||||
return placeholder
|
||||
return f"{value[:head]}...{value[-tail:]}"
|
||||
|
||||
|
||||
def _mask_token(token: str) -> str:
|
||||
"""Mask a log token — conservative 18-char floor, preserves 6 prefix / 4 suffix."""
|
||||
# Empty input: historically this returned "***" rather than "". Preserve.
|
||||
if not token:
|
||||
"""Mask a token, preserving prefix for long tokens."""
|
||||
if len(token) < 18:
|
||||
return "***"
|
||||
return mask_secret(token, head=6, tail=4, floor=18)
|
||||
return f"{token[:6]}...{token[-4:]}"
|
||||
|
||||
|
||||
def _redact_query_string(query: str) -> str:
|
||||
"""Redact sensitive parameter values in a URL query string.
|
||||
|
||||
Handles `k=v&k=v` format. Sensitive keys (case-insensitive) have values
|
||||
replaced with `***`. Non-sensitive keys pass through unchanged.
|
||||
Empty or malformed pairs are preserved as-is.
|
||||
"""
|
||||
if not query:
|
||||
return query
|
||||
parts = []
|
||||
for pair in query.split("&"):
|
||||
if "=" not in pair:
|
||||
parts.append(pair)
|
||||
continue
|
||||
key, _, value = pair.partition("=")
|
||||
if key.lower() in _SENSITIVE_QUERY_PARAMS:
|
||||
parts.append(f"{key}=***")
|
||||
else:
|
||||
parts.append(pair)
|
||||
return "&".join(parts)
|
||||
|
||||
|
||||
def _redact_url_query_params(text: str) -> str:
|
||||
"""Scan text for URLs with query strings and redact sensitive params.
|
||||
|
||||
Catches opaque tokens that don't match vendor prefix regexes, e.g.
|
||||
`https://example.com/cb?code=ABC123&state=xyz` → `...?code=***&state=xyz`.
|
||||
"""
|
||||
def _sub(m: re.Match) -> str:
|
||||
scheme = m.group(1)
|
||||
authority = m.group(2)
|
||||
path = m.group(3)
|
||||
query = _redact_query_string(m.group(4))
|
||||
fragment = m.group(5) or ""
|
||||
return f"{scheme}://{authority}{path}?{query}{fragment}"
|
||||
return _URL_WITH_QUERY_RE.sub(_sub, text)
|
||||
|
||||
|
||||
def _redact_url_userinfo(text: str) -> str:
|
||||
"""Strip `user:password@` from HTTP/WS/FTP URLs.
|
||||
|
||||
DB protocols (postgres, mysql, mongodb, redis, amqp) are handled
|
||||
separately by `_DB_CONNSTR_RE`.
|
||||
"""
|
||||
return _URL_USERINFO_RE.sub(
|
||||
lambda m: f"{m.group(1)}://{m.group(2)}:***@",
|
||||
text,
|
||||
)
|
||||
|
||||
|
||||
def _redact_form_body(text: str) -> str:
|
||||
"""Redact sensitive values in a form-urlencoded body.
|
||||
|
||||
Only applies when the entire input looks like a pure form body
|
||||
(k=v&k=v with no newlines, no other text). Single-line non-form
|
||||
text passes through unchanged. This is a conservative pass — the
|
||||
`_redact_url_query_params` function handles embedded query strings.
|
||||
"""
|
||||
if not text or "\n" in text or "&" not in text:
|
||||
return text
|
||||
# The body-body form check is strict: only trigger on clean k=v&k=v.
|
||||
if not _FORM_BODY_RE.match(text.strip()):
|
||||
return text
|
||||
return _redact_query_string(text.strip())
|
||||
|
||||
|
||||
def redact_sensitive_text(text: str, *, force: bool = False, code_file: bool = False) -> str:
|
||||
def redact_sensitive_text(text: str) -> str:
|
||||
"""Apply all redaction patterns to a block of text.
|
||||
|
||||
Safe to call on any string -- non-matching text passes through unchanged.
|
||||
Disabled by default — enable via security.redact_secrets: true in config.yaml.
|
||||
Set force=True for safety boundaries that must never return raw secrets
|
||||
regardless of the user's global logging redaction preference.
|
||||
|
||||
Set code_file=True to skip the ENV-assignment and JSON-field regex
|
||||
patterns when the text is known to be source code (e.g. MAX_TOKENS=***
|
||||
constants, "apiKey": "test" fixtures). Prefix patterns, auth headers,
|
||||
private keys, DB connstrings, JWTs, and URL secrets are still redacted.
|
||||
Disabled when security.redact_secrets is false in config.yaml.
|
||||
"""
|
||||
if text is None:
|
||||
return None
|
||||
@@ -327,24 +117,23 @@ def redact_sensitive_text(text: str, *, force: bool = False, code_file: bool = F
|
||||
text = str(text)
|
||||
if not text:
|
||||
return text
|
||||
if not (force or _REDACT_ENABLED):
|
||||
if not _REDACT_ENABLED:
|
||||
return text
|
||||
|
||||
# Known prefixes (sk-, ghp_, etc.)
|
||||
text = _PREFIX_RE.sub(lambda m: _mask_token(m.group(1)), text)
|
||||
|
||||
# ENV assignments: OPENAI_API_KEY=*** (skip for code files — false positives)
|
||||
if not code_file:
|
||||
def _redact_env(m):
|
||||
name, quote, value = m.group(1), m.group(2), m.group(3)
|
||||
return f"{name}={quote}{_mask_token(value)}{quote}"
|
||||
text = _ENV_ASSIGN_RE.sub(_redact_env, text)
|
||||
# ENV assignments: OPENAI_API_KEY=sk-abc...
|
||||
def _redact_env(m):
|
||||
name, quote, value = m.group(1), m.group(2), m.group(3)
|
||||
return f"{name}={quote}{_mask_token(value)}{quote}"
|
||||
text = _ENV_ASSIGN_RE.sub(_redact_env, text)
|
||||
|
||||
# JSON fields: "apiKey": "***" (skip for code files — false positives)
|
||||
def _redact_json(m):
|
||||
key, value = m.group(1), m.group(2)
|
||||
return f'{key}: "{_mask_token(value)}"'
|
||||
text = _JSON_FIELD_RE.sub(_redact_json, text)
|
||||
# JSON fields: "apiKey": "value"
|
||||
def _redact_json(m):
|
||||
key, value = m.group(1), m.group(2)
|
||||
return f'{key}: "{_mask_token(value)}"'
|
||||
text = _JSON_FIELD_RE.sub(_redact_json, text)
|
||||
|
||||
# Authorization headers
|
||||
text = _AUTH_HEADER_RE.sub(
|
||||
@@ -365,22 +154,6 @@ def redact_sensitive_text(text: str, *, force: bool = False, code_file: bool = F
|
||||
# Database connection string passwords
|
||||
text = _DB_CONNSTR_RE.sub(lambda m: f"{m.group(1)}***{m.group(3)}", text)
|
||||
|
||||
# JWT tokens (eyJ... — base64-encoded JSON headers)
|
||||
text = _JWT_RE.sub(lambda m: _mask_token(m.group(0)), text)
|
||||
|
||||
# URL userinfo (http(s)://user:pass@host) — redact for non-DB schemes.
|
||||
# DB schemes are handled above by _DB_CONNSTR_RE.
|
||||
text = _redact_url_userinfo(text)
|
||||
|
||||
# URL query params containing opaque tokens (?access_token=…&code=…)
|
||||
text = _redact_url_query_params(text)
|
||||
|
||||
# Form-urlencoded bodies (only triggers on clean k=v&k=v inputs).
|
||||
text = _redact_form_body(text)
|
||||
|
||||
# Discord user/role mentions (<@snowflake_id>)
|
||||
text = _DISCORD_MENTION_RE.sub(lambda m: f"<@{'!' if '!' in m.group(0) else ''}***>", text)
|
||||
|
||||
# E.164 phone numbers (Signal, WhatsApp)
|
||||
def _redact_phone(m):
|
||||
phone = m.group(1)
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
"""Retry utilities — jittered backoff for decorrelated retries.
|
||||
|
||||
Replaces fixed exponential backoff with jittered delays to prevent
|
||||
thundering-herd retry spikes when multiple sessions hit the same
|
||||
rate-limited provider concurrently.
|
||||
"""
|
||||
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
|
||||
# Monotonic counter for jitter seed uniqueness within the same process.
|
||||
# Protected by a lock to avoid race conditions in concurrent retry paths
|
||||
# (e.g. multiple gateway sessions retrying simultaneously).
|
||||
_jitter_counter = 0
|
||||
_jitter_lock = threading.Lock()
|
||||
|
||||
|
||||
def jittered_backoff(
|
||||
attempt: int,
|
||||
*,
|
||||
base_delay: float = 5.0,
|
||||
max_delay: float = 120.0,
|
||||
jitter_ratio: float = 0.5,
|
||||
) -> float:
|
||||
"""Compute a jittered exponential backoff delay.
|
||||
|
||||
Args:
|
||||
attempt: 1-based retry attempt number.
|
||||
base_delay: Base delay in seconds for attempt 1.
|
||||
max_delay: Maximum delay cap in seconds.
|
||||
jitter_ratio: Fraction of computed delay to use as random jitter
|
||||
range. 0.5 means jitter is uniform in [0, 0.5 * delay].
|
||||
|
||||
Returns:
|
||||
Delay in seconds: min(base * 2^(attempt-1), max_delay) + jitter.
|
||||
|
||||
The jitter decorrelates concurrent retries so multiple sessions
|
||||
hitting the same provider don't all retry at the same instant.
|
||||
"""
|
||||
global _jitter_counter
|
||||
with _jitter_lock:
|
||||
_jitter_counter += 1
|
||||
tick = _jitter_counter
|
||||
|
||||
exponent = max(0, attempt - 1)
|
||||
if exponent >= 63 or base_delay <= 0:
|
||||
delay = max_delay
|
||||
else:
|
||||
delay = min(base_delay * (2 ** exponent), max_delay)
|
||||
|
||||
# Seed from time + counter for decorrelation even with coarse clocks.
|
||||
seed = (time.time_ns() ^ (tick * 0x9E3779B9)) & 0xFFFFFFFF
|
||||
rng = random.Random(seed)
|
||||
jitter = rng.uniform(0, jitter_ratio * delay)
|
||||
|
||||
return delay + jitter
|
||||
@@ -1,836 +0,0 @@
|
||||
"""
|
||||
Shell-script hooks bridge.
|
||||
|
||||
Reads the ``hooks:`` block from ``cli-config.yaml``, prompts the user for
|
||||
consent on first use of each ``(event, command)`` pair, and registers
|
||||
callbacks on the existing plugin hook manager so every existing
|
||||
``invoke_hook()`` site dispatches to the configured shell scripts — with
|
||||
zero changes to call sites.
|
||||
|
||||
Design notes
|
||||
------------
|
||||
* Python plugins and shell hooks compose naturally: both flow through
|
||||
:func:`hermes_cli.plugins.invoke_hook` and its aggregators. Python
|
||||
plugins are registered first (via ``discover_and_load()``) so their
|
||||
block decisions win ties over shell-hook blocks.
|
||||
* Subprocess execution uses ``shlex.split(os.path.expanduser(command))``
|
||||
with ``shell=False`` — no shell injection footguns. Users that need
|
||||
pipes/redirection wrap their logic in a script.
|
||||
* First-use consent is gated by the allowlist under
|
||||
``~/.hermes/shell-hooks-allowlist.json``. Non-TTY callers must pass
|
||||
``accept_hooks=True`` (resolved from ``--accept-hooks``,
|
||||
``HERMES_ACCEPT_HOOKS``, or ``hooks_auto_accept: true`` in config)
|
||||
for registration to succeed without a prompt.
|
||||
* Registration is idempotent — safe to invoke from both the CLI entry
|
||||
point (``hermes_cli/main.py``) and the gateway entry point
|
||||
(``gateway/run.py``).
|
||||
|
||||
Wire protocol
|
||||
-------------
|
||||
**stdin** (JSON, piped to the script)::
|
||||
|
||||
{
|
||||
"hook_event_name": "pre_tool_call",
|
||||
"tool_name": "terminal",
|
||||
"tool_input": {"command": "rm -rf /"},
|
||||
"session_id": "sess_abc123",
|
||||
"cwd": "/home/user/project",
|
||||
"extra": {...} # event-specific kwargs
|
||||
}
|
||||
|
||||
**stdout** (JSON, optional — anything else is ignored)::
|
||||
|
||||
# Block a pre_tool_call (either shape accepted; normalised internally):
|
||||
{"decision": "block", "reason": "Forbidden command"} # Claude-Code-style
|
||||
{"action": "block", "message": "Forbidden command"} # Hermes-canonical
|
||||
|
||||
# Inject context for pre_llm_call:
|
||||
{"context": "Today is Friday"}
|
||||
|
||||
# Silent no-op:
|
||||
<empty or any non-matching JSON object>
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple
|
||||
|
||||
try:
|
||||
import fcntl # POSIX only; Windows falls back to best-effort without flock.
|
||||
except ImportError: # pragma: no cover
|
||||
fcntl = None # type: ignore[assignment]
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
from utils import atomic_replace
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
MAX_TIMEOUT_SECONDS = 300
|
||||
ALLOWLIST_FILENAME = "shell-hooks-allowlist.json"
|
||||
|
||||
# (event, matcher, command) triples that have been wired to the plugin
|
||||
# manager in the current process. Matcher is part of the key because
|
||||
# the same script can legitimately register for different matchers under
|
||||
# the same event (e.g. one entry per tool the user wants to gate).
|
||||
# Second registration attempts for the exact same triple become no-ops
|
||||
# so the CLI and gateway can both call register_from_config() safely.
|
||||
_registered: Set[Tuple[str, Optional[str], str]] = set()
|
||||
_registered_lock = threading.Lock()
|
||||
|
||||
# Intra-process lock for allowlist read-modify-write on platforms that
|
||||
# lack ``fcntl`` (non-POSIX). Kept separate from ``_registered_lock``
|
||||
# because ``register_from_config`` already holds ``_registered_lock`` when
|
||||
# it triggers ``_record_approval`` — reusing it here would self-deadlock
|
||||
# (``threading.Lock`` is non-reentrant). POSIX callers use the sibling
|
||||
# ``.lock`` file via ``fcntl.flock`` and bypass this.
|
||||
_allowlist_write_lock = threading.Lock()
|
||||
|
||||
|
||||
@dataclass
|
||||
class ShellHookSpec:
|
||||
"""Parsed and validated representation of a single ``hooks:`` entry."""
|
||||
|
||||
event: str
|
||||
command: str
|
||||
matcher: Optional[str] = None
|
||||
timeout: int = DEFAULT_TIMEOUT_SECONDS
|
||||
compiled_matcher: Optional[re.Pattern] = field(default=None, repr=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
# Strip whitespace introduced by YAML quirks (e.g. multi-line string
|
||||
# folding) — a matcher of " terminal" would otherwise silently fail
|
||||
# to match "terminal" without any diagnostic.
|
||||
if isinstance(self.matcher, str):
|
||||
stripped = self.matcher.strip()
|
||||
self.matcher = stripped if stripped else None
|
||||
if self.matcher:
|
||||
try:
|
||||
self.compiled_matcher = re.compile(self.matcher)
|
||||
except re.error as exc:
|
||||
logger.warning(
|
||||
"shell hook matcher %r is invalid (%s) — treating as "
|
||||
"literal equality", self.matcher, exc,
|
||||
)
|
||||
self.compiled_matcher = None
|
||||
|
||||
def matches_tool(self, tool_name: Optional[str]) -> bool:
|
||||
if not self.matcher:
|
||||
return True
|
||||
if tool_name is None:
|
||||
return False
|
||||
if self.compiled_matcher is not None:
|
||||
return self.compiled_matcher.fullmatch(tool_name) is not None
|
||||
# compiled_matcher is None only when the regex failed to compile,
|
||||
# in which case we already warned and fall back to literal equality.
|
||||
return tool_name == self.matcher
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def register_from_config(
|
||||
cfg: Optional[Dict[str, Any]],
|
||||
*,
|
||||
accept_hooks: bool = False,
|
||||
) -> List[ShellHookSpec]:
|
||||
"""Register every configured shell hook on the plugin manager.
|
||||
|
||||
``cfg`` is the full parsed config dict (``hermes_cli.config.load_config``
|
||||
output). The ``hooks:`` key is read out of it. Missing, empty, or
|
||||
non-dict ``hooks`` is treated as zero configured hooks.
|
||||
|
||||
``accept_hooks=True`` skips the TTY consent prompt — the caller is
|
||||
promising that the user has opted in via a flag, env var, or config
|
||||
setting. ``HERMES_ACCEPT_HOOKS=1`` and ``hooks_auto_accept: true`` are
|
||||
also honored inside this function so either CLI or gateway call sites
|
||||
pick them up.
|
||||
|
||||
Returns the list of :class:`ShellHookSpec` entries that ended up wired
|
||||
up on the plugin manager. Skipped entries (unknown events, malformed,
|
||||
not allowlisted, already registered) are logged but not returned.
|
||||
"""
|
||||
if not isinstance(cfg, dict):
|
||||
return []
|
||||
|
||||
effective_accept = _resolve_effective_accept(cfg, accept_hooks)
|
||||
|
||||
specs = _parse_hooks_block(cfg.get("hooks"))
|
||||
if not specs:
|
||||
return []
|
||||
|
||||
registered: List[ShellHookSpec] = []
|
||||
|
||||
# Import lazily — avoids circular imports at module-load time.
|
||||
from hermes_cli.plugins import get_plugin_manager
|
||||
|
||||
manager = get_plugin_manager()
|
||||
|
||||
# Idempotence + allowlist read happen under the lock; the TTY
|
||||
# prompt runs outside so other threads aren't parked on a blocking
|
||||
# input(). Mutation re-takes the lock with a defensive idempotence
|
||||
# re-check in case two callers ever race through the prompt.
|
||||
for spec in specs:
|
||||
key = (spec.event, spec.matcher, spec.command)
|
||||
with _registered_lock:
|
||||
if key in _registered:
|
||||
continue
|
||||
already_allowlisted = _is_allowlisted(spec.event, spec.command)
|
||||
|
||||
if not already_allowlisted:
|
||||
if not _prompt_and_record(
|
||||
spec.event, spec.command, accept_hooks=effective_accept,
|
||||
):
|
||||
logger.warning(
|
||||
"shell hook for %s (%s) not allowlisted — skipped. "
|
||||
"Use --accept-hooks / HERMES_ACCEPT_HOOKS=1 / "
|
||||
"hooks_auto_accept: true, or approve at the TTY "
|
||||
"prompt next run.",
|
||||
spec.event, spec.command,
|
||||
)
|
||||
continue
|
||||
|
||||
with _registered_lock:
|
||||
if key in _registered:
|
||||
continue
|
||||
manager._hooks.setdefault(spec.event, []).append(_make_callback(spec))
|
||||
_registered.add(key)
|
||||
registered.append(spec)
|
||||
logger.info(
|
||||
"shell hook registered: %s -> %s (matcher=%s, timeout=%ds)",
|
||||
spec.event, spec.command, spec.matcher, spec.timeout,
|
||||
)
|
||||
|
||||
return registered
|
||||
|
||||
|
||||
def iter_configured_hooks(cfg: Optional[Dict[str, Any]]) -> List[ShellHookSpec]:
|
||||
"""Return the parsed ``ShellHookSpec`` entries from config without
|
||||
registering anything. Used by ``hermes hooks list`` and ``doctor``."""
|
||||
if not isinstance(cfg, dict):
|
||||
return []
|
||||
return _parse_hooks_block(cfg.get("hooks"))
|
||||
|
||||
|
||||
def reset_for_tests() -> None:
|
||||
"""Clear the idempotence set. Test-only helper."""
|
||||
with _registered_lock:
|
||||
_registered.clear()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Config parsing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _parse_hooks_block(hooks_cfg: Any) -> List[ShellHookSpec]:
|
||||
"""Normalise the ``hooks:`` dict into a flat list of ``ShellHookSpec``.
|
||||
|
||||
Malformed entries warn-and-skip — we never raise from config parsing
|
||||
because a broken hook must not crash the agent.
|
||||
"""
|
||||
from hermes_cli.plugins import VALID_HOOKS
|
||||
|
||||
if not isinstance(hooks_cfg, dict):
|
||||
return []
|
||||
|
||||
specs: List[ShellHookSpec] = []
|
||||
|
||||
for event_name, entries in hooks_cfg.items():
|
||||
if event_name not in VALID_HOOKS:
|
||||
suggestion = difflib.get_close_matches(
|
||||
str(event_name), VALID_HOOKS, n=1, cutoff=0.6,
|
||||
)
|
||||
if suggestion:
|
||||
logger.warning(
|
||||
"unknown hook event %r in hooks: config — did you mean %r?",
|
||||
event_name, suggestion[0],
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"unknown hook event %r in hooks: config (valid: %s)",
|
||||
event_name, ", ".join(sorted(VALID_HOOKS)),
|
||||
)
|
||||
continue
|
||||
|
||||
if entries is None:
|
||||
continue
|
||||
|
||||
if not isinstance(entries, list):
|
||||
logger.warning(
|
||||
"hooks.%s must be a list of hook definitions; got %s",
|
||||
event_name, type(entries).__name__,
|
||||
)
|
||||
continue
|
||||
|
||||
for i, raw in enumerate(entries):
|
||||
spec = _parse_single_entry(event_name, i, raw)
|
||||
if spec is not None:
|
||||
specs.append(spec)
|
||||
|
||||
return specs
|
||||
|
||||
|
||||
def _parse_single_entry(
|
||||
event: str, index: int, raw: Any,
|
||||
) -> Optional[ShellHookSpec]:
|
||||
if not isinstance(raw, dict):
|
||||
logger.warning(
|
||||
"hooks.%s[%d] must be a mapping with a 'command' key; got %s",
|
||||
event, index, type(raw).__name__,
|
||||
)
|
||||
return None
|
||||
|
||||
command = raw.get("command")
|
||||
if not isinstance(command, str) or not command.strip():
|
||||
logger.warning(
|
||||
"hooks.%s[%d] is missing a non-empty 'command' field",
|
||||
event, index,
|
||||
)
|
||||
return None
|
||||
|
||||
matcher = raw.get("matcher")
|
||||
if matcher is not None and not isinstance(matcher, str):
|
||||
logger.warning(
|
||||
"hooks.%s[%d].matcher must be a string regex; ignoring",
|
||||
event, index,
|
||||
)
|
||||
matcher = None
|
||||
|
||||
if matcher is not None and event not in ("pre_tool_call", "post_tool_call"):
|
||||
logger.warning(
|
||||
"hooks.%s[%d].matcher=%r will be ignored at runtime — the "
|
||||
"matcher field is only honored for pre_tool_call / "
|
||||
"post_tool_call. The hook will fire on every %s event.",
|
||||
event, index, matcher, event,
|
||||
)
|
||||
matcher = None
|
||||
|
||||
timeout_raw = raw.get("timeout", DEFAULT_TIMEOUT_SECONDS)
|
||||
try:
|
||||
timeout = int(timeout_raw)
|
||||
except (TypeError, ValueError):
|
||||
logger.warning(
|
||||
"hooks.%s[%d].timeout must be an int (got %r); using default %ds",
|
||||
event, index, timeout_raw, DEFAULT_TIMEOUT_SECONDS,
|
||||
)
|
||||
timeout = DEFAULT_TIMEOUT_SECONDS
|
||||
|
||||
if timeout < 1:
|
||||
logger.warning(
|
||||
"hooks.%s[%d].timeout must be >=1; using default %ds",
|
||||
event, index, DEFAULT_TIMEOUT_SECONDS,
|
||||
)
|
||||
timeout = DEFAULT_TIMEOUT_SECONDS
|
||||
|
||||
if timeout > MAX_TIMEOUT_SECONDS:
|
||||
logger.warning(
|
||||
"hooks.%s[%d].timeout=%ds exceeds max %ds; clamping",
|
||||
event, index, timeout, MAX_TIMEOUT_SECONDS,
|
||||
)
|
||||
timeout = MAX_TIMEOUT_SECONDS
|
||||
|
||||
return ShellHookSpec(
|
||||
event=event,
|
||||
command=command.strip(),
|
||||
matcher=matcher,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Subprocess callback
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_TOP_LEVEL_PAYLOAD_KEYS = {"tool_name", "args", "session_id", "parent_session_id"}
|
||||
|
||||
|
||||
def _spawn(spec: ShellHookSpec, stdin_json: str) -> Dict[str, Any]:
|
||||
"""Run ``spec.command`` as a subprocess with ``stdin_json`` on stdin.
|
||||
|
||||
Returns a diagnostic dict with the same keys for every outcome
|
||||
(``returncode``, ``stdout``, ``stderr``, ``timed_out``,
|
||||
``elapsed_seconds``, ``error``). This is the single place the
|
||||
subprocess is actually invoked — both the live callback path
|
||||
(:func:`_make_callback`) and the CLI test helper (:func:`run_once`)
|
||||
go through it.
|
||||
"""
|
||||
result: Dict[str, Any] = {
|
||||
"returncode": None,
|
||||
"stdout": "",
|
||||
"stderr": "",
|
||||
"timed_out": False,
|
||||
"elapsed_seconds": 0.0,
|
||||
"error": None,
|
||||
}
|
||||
try:
|
||||
argv = shlex.split(os.path.expanduser(spec.command))
|
||||
except ValueError as exc:
|
||||
result["error"] = f"command {spec.command!r} cannot be parsed: {exc}"
|
||||
return result
|
||||
if not argv:
|
||||
result["error"] = "empty command"
|
||||
return result
|
||||
|
||||
t0 = time.monotonic()
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
argv,
|
||||
input=stdin_json,
|
||||
capture_output=True,
|
||||
timeout=spec.timeout,
|
||||
text=True,
|
||||
shell=False,
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
result["timed_out"] = True
|
||||
result["elapsed_seconds"] = round(time.monotonic() - t0, 3)
|
||||
return result
|
||||
except FileNotFoundError:
|
||||
result["error"] = "command not found"
|
||||
return result
|
||||
except PermissionError:
|
||||
result["error"] = "command not executable"
|
||||
return result
|
||||
except Exception as exc: # pragma: no cover — defensive
|
||||
result["error"] = str(exc)
|
||||
return result
|
||||
|
||||
result["returncode"] = proc.returncode
|
||||
result["stdout"] = proc.stdout or ""
|
||||
result["stderr"] = proc.stderr or ""
|
||||
result["elapsed_seconds"] = round(time.monotonic() - t0, 3)
|
||||
return result
|
||||
|
||||
|
||||
def _make_callback(spec: ShellHookSpec) -> Callable[..., Optional[Dict[str, Any]]]:
|
||||
"""Build the closure that ``invoke_hook()`` will call per firing."""
|
||||
|
||||
def _callback(**kwargs: Any) -> Optional[Dict[str, Any]]:
|
||||
# Matcher gate — only meaningful for tool-scoped events.
|
||||
if spec.event in ("pre_tool_call", "post_tool_call"):
|
||||
if not spec.matches_tool(kwargs.get("tool_name")):
|
||||
return None
|
||||
|
||||
r = _spawn(spec, _serialize_payload(spec.event, kwargs))
|
||||
|
||||
if r["error"]:
|
||||
logger.warning(
|
||||
"shell hook failed (event=%s command=%s): %s",
|
||||
spec.event, spec.command, r["error"],
|
||||
)
|
||||
return None
|
||||
if r["timed_out"]:
|
||||
logger.warning(
|
||||
"shell hook timed out after %.2fs (event=%s command=%s)",
|
||||
r["elapsed_seconds"], spec.event, spec.command,
|
||||
)
|
||||
return None
|
||||
|
||||
stderr = r["stderr"].strip()
|
||||
if stderr:
|
||||
logger.debug(
|
||||
"shell hook stderr (event=%s command=%s): %s",
|
||||
spec.event, spec.command, stderr[:400],
|
||||
)
|
||||
# Non-zero exits: log but still parse stdout so scripts that
|
||||
# signal failure via exit code can also return a block directive.
|
||||
if r["returncode"] != 0:
|
||||
logger.warning(
|
||||
"shell hook exited %d (event=%s command=%s); stderr=%s",
|
||||
r["returncode"], spec.event, spec.command, stderr[:400],
|
||||
)
|
||||
return _parse_response(spec.event, r["stdout"])
|
||||
|
||||
_callback.__name__ = f"shell_hook[{spec.event}:{spec.command}]"
|
||||
_callback.__qualname__ = _callback.__name__
|
||||
return _callback
|
||||
|
||||
|
||||
def _serialize_payload(event: str, kwargs: Dict[str, Any]) -> str:
|
||||
"""Render the stdin JSON payload. Unserialisable values are
|
||||
stringified via ``default=str`` rather than dropped."""
|
||||
extras = {k: v for k, v in kwargs.items() if k not in _TOP_LEVEL_PAYLOAD_KEYS}
|
||||
try:
|
||||
cwd = str(Path.cwd())
|
||||
except OSError:
|
||||
cwd = ""
|
||||
payload = {
|
||||
"hook_event_name": event,
|
||||
"tool_name": kwargs.get("tool_name"),
|
||||
"tool_input": kwargs.get("args") if isinstance(kwargs.get("args"), dict) else None,
|
||||
"session_id": kwargs.get("session_id") or kwargs.get("parent_session_id") or "",
|
||||
"cwd": cwd,
|
||||
"extra": extras,
|
||||
}
|
||||
return json.dumps(payload, ensure_ascii=False, default=str)
|
||||
|
||||
|
||||
def _parse_response(event: str, stdout: str) -> Optional[Dict[str, Any]]:
|
||||
"""Translate stdout JSON into a Hermes wire-shape dict.
|
||||
|
||||
For ``pre_tool_call`` the Claude-Code-style ``{"decision": "block",
|
||||
"reason": "..."}`` payload is translated into the canonical Hermes
|
||||
``{"action": "block", "message": "..."}`` shape expected by
|
||||
:func:`hermes_cli.plugins.get_pre_tool_call_block_message`. This is
|
||||
the single most important correctness invariant in this module —
|
||||
skipping the translation silently breaks every ``pre_tool_call``
|
||||
block directive.
|
||||
|
||||
For ``pre_llm_call``, ``{"context": "..."}`` is passed through
|
||||
unchanged to match the existing plugin-hook contract.
|
||||
|
||||
Anything else returns ``None``.
|
||||
"""
|
||||
stdout = (stdout or "").strip()
|
||||
if not stdout:
|
||||
return None
|
||||
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(
|
||||
"shell hook stdout was not valid JSON (event=%s): %s",
|
||||
event, stdout[:200],
|
||||
)
|
||||
return None
|
||||
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
|
||||
if event == "pre_tool_call":
|
||||
if data.get("action") == "block":
|
||||
message = data.get("message") or data.get("reason") or ""
|
||||
if isinstance(message, str) and message:
|
||||
return {"action": "block", "message": message}
|
||||
if data.get("decision") == "block":
|
||||
message = data.get("reason") or data.get("message") or ""
|
||||
if isinstance(message, str) and message:
|
||||
return {"action": "block", "message": message}
|
||||
return None
|
||||
|
||||
context = data.get("context")
|
||||
if isinstance(context, str) and context.strip():
|
||||
return {"context": context}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Allowlist / consent
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def allowlist_path() -> Path:
|
||||
"""Path to the per-user shell-hook allowlist file."""
|
||||
return get_hermes_home() / ALLOWLIST_FILENAME
|
||||
|
||||
|
||||
def load_allowlist() -> Dict[str, Any]:
|
||||
"""Return the parsed allowlist, or an empty skeleton if absent."""
|
||||
try:
|
||||
raw = json.loads(allowlist_path().read_text())
|
||||
except (FileNotFoundError, json.JSONDecodeError, OSError):
|
||||
return {"approvals": []}
|
||||
if not isinstance(raw, dict):
|
||||
return {"approvals": []}
|
||||
approvals = raw.get("approvals")
|
||||
if not isinstance(approvals, list):
|
||||
raw["approvals"] = []
|
||||
return raw
|
||||
|
||||
|
||||
def save_allowlist(data: Dict[str, Any]) -> None:
|
||||
"""Atomically persist the allowlist via per-process ``mkstemp`` +
|
||||
``os.replace``. Cross-process read-modify-write races are handled
|
||||
by :func:`_locked_update_approvals` (``fcntl.flock``). On OSError
|
||||
the failure is logged; the in-process hook still registers but
|
||||
the approval won't survive across runs."""
|
||||
p = allowlist_path()
|
||||
try:
|
||||
p.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp_path = tempfile.mkstemp(
|
||||
prefix=f"{p.name}.", suffix=".tmp", dir=str(p.parent),
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w") as fh:
|
||||
fh.write(json.dumps(data, indent=2, sort_keys=True))
|
||||
atomic_replace(tmp_path, p)
|
||||
except Exception:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
except OSError as exc:
|
||||
logger.warning(
|
||||
"Failed to persist shell hook allowlist to %s: %s. "
|
||||
"The approval is in-memory for this run, but the next "
|
||||
"startup will re-prompt (or skip registration on non-TTY "
|
||||
"runs without --accept-hooks / HERMES_ACCEPT_HOOKS).",
|
||||
p, exc,
|
||||
)
|
||||
|
||||
|
||||
def _is_allowlisted(event: str, command: str) -> bool:
|
||||
data = load_allowlist()
|
||||
return any(
|
||||
isinstance(e, dict)
|
||||
and e.get("event") == event
|
||||
and e.get("command") == command
|
||||
for e in data.get("approvals", [])
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _locked_update_approvals() -> Iterator[Dict[str, Any]]:
|
||||
"""Serialise read-modify-write on the allowlist across processes.
|
||||
|
||||
Holds an exclusive ``flock`` on a sibling lock file for the duration
|
||||
of the update so concurrent ``_record_approval``/``revoke`` callers
|
||||
cannot clobber each other's changes (the race Codex reproduced with
|
||||
20–50 simultaneous writers). Falls back to an in-process lock on
|
||||
platforms without ``fcntl``.
|
||||
"""
|
||||
p = allowlist_path()
|
||||
p.parent.mkdir(parents=True, exist_ok=True)
|
||||
lock_path = p.with_suffix(p.suffix + ".lock")
|
||||
|
||||
if fcntl is None: # pragma: no cover — non-POSIX fallback
|
||||
with _allowlist_write_lock:
|
||||
data = load_allowlist()
|
||||
yield data
|
||||
save_allowlist(data)
|
||||
return
|
||||
|
||||
with open(lock_path, "a+") as lock_fh:
|
||||
fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
data = load_allowlist()
|
||||
yield data
|
||||
save_allowlist(data)
|
||||
finally:
|
||||
fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN)
|
||||
|
||||
|
||||
def _prompt_and_record(
|
||||
event: str, command: str, *, accept_hooks: bool,
|
||||
) -> bool:
|
||||
"""Decide whether to approve an unseen ``(event, command)`` pair.
|
||||
Returns ``True`` iff the approval was granted and recorded.
|
||||
"""
|
||||
if accept_hooks:
|
||||
_record_approval(event, command)
|
||||
logger.info(
|
||||
"shell hook auto-approved via --accept-hooks / env / config: "
|
||||
"%s -> %s", event, command,
|
||||
)
|
||||
return True
|
||||
|
||||
if not sys.stdin.isatty():
|
||||
return False
|
||||
|
||||
print(
|
||||
f"\n⚠ Hermes is about to register a shell hook that will run a\n"
|
||||
f" command on your behalf.\n\n"
|
||||
f" Event: {event}\n"
|
||||
f" Command: {command}\n\n"
|
||||
f" Commands run with your full user credentials. Only approve\n"
|
||||
f" commands you trust."
|
||||
)
|
||||
try:
|
||||
answer = input("Allow this hook to run? [y/N]: ").strip().lower()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print() # keep the terminal tidy after ^C
|
||||
return False
|
||||
|
||||
if answer in ("y", "yes"):
|
||||
_record_approval(event, command)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _record_approval(event: str, command: str) -> None:
|
||||
entry = {
|
||||
"event": event,
|
||||
"command": command,
|
||||
"approved_at": _utc_now_iso(),
|
||||
"script_mtime_at_approval": script_mtime_iso(command),
|
||||
}
|
||||
with _locked_update_approvals() as data:
|
||||
data["approvals"] = [
|
||||
e for e in data.get("approvals", [])
|
||||
if not (
|
||||
isinstance(e, dict)
|
||||
and e.get("event") == event
|
||||
and e.get("command") == command
|
||||
)
|
||||
] + [entry]
|
||||
|
||||
|
||||
def _utc_now_iso() -> str:
|
||||
return datetime.now(tz=timezone.utc).isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
def revoke(command: str) -> int:
|
||||
"""Remove every allowlist entry matching ``command``.
|
||||
|
||||
Returns the number of entries removed. Does not unregister any
|
||||
callbacks that are already live on the plugin manager in the current
|
||||
process — restart the CLI / gateway to drop them.
|
||||
"""
|
||||
with _locked_update_approvals() as data:
|
||||
before = len(data.get("approvals", []))
|
||||
data["approvals"] = [
|
||||
e for e in data.get("approvals", [])
|
||||
if not (isinstance(e, dict) and e.get("command") == command)
|
||||
]
|
||||
after = len(data["approvals"])
|
||||
return before - after
|
||||
|
||||
|
||||
_SCRIPT_EXTENSIONS: Tuple[str, ...] = (
|
||||
".sh", ".bash", ".zsh", ".fish",
|
||||
".py", ".pyw",
|
||||
".rb", ".pl", ".lua",
|
||||
".js", ".mjs", ".cjs", ".ts",
|
||||
)
|
||||
|
||||
|
||||
def _command_script_path(command: str) -> str:
|
||||
"""Return the script path from ``command`` for doctor / drift checks.
|
||||
|
||||
Prefers a token ending in a known script extension, then a token
|
||||
containing ``/`` or leading ``~``, then the first token. Handles
|
||||
``python3 /path/hook.py``, ``/usr/bin/env bash hook.sh``, and the
|
||||
common bare-path form.
|
||||
"""
|
||||
try:
|
||||
parts = shlex.split(command)
|
||||
except ValueError:
|
||||
return command
|
||||
if not parts:
|
||||
return command
|
||||
for part in parts:
|
||||
if part.lower().endswith(_SCRIPT_EXTENSIONS):
|
||||
return part
|
||||
for part in parts:
|
||||
if "/" in part or part.startswith("~"):
|
||||
return part
|
||||
return parts[0]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers for accept-hooks resolution
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _resolve_effective_accept(
|
||||
cfg: Dict[str, Any], accept_hooks_arg: bool,
|
||||
) -> bool:
|
||||
"""Combine all three opt-in channels into a single boolean.
|
||||
|
||||
Precedence (any truthy source flips us on):
|
||||
1. ``--accept-hooks`` flag (CLI) / explicit argument
|
||||
2. ``HERMES_ACCEPT_HOOKS`` env var
|
||||
3. ``hooks_auto_accept: true`` in ``cli-config.yaml``
|
||||
"""
|
||||
if accept_hooks_arg:
|
||||
return True
|
||||
env = os.environ.get("HERMES_ACCEPT_HOOKS", "").strip().lower()
|
||||
if env in ("1", "true", "yes", "on"):
|
||||
return True
|
||||
cfg_val = cfg.get("hooks_auto_accept", False)
|
||||
if isinstance(cfg_val, bool):
|
||||
return cfg_val
|
||||
if isinstance(cfg_val, str):
|
||||
return cfg_val.strip().lower() in ("1", "true", "yes", "on")
|
||||
return False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Introspection (used by `hermes hooks` CLI)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def allowlist_entry_for(event: str, command: str) -> Optional[Dict[str, Any]]:
|
||||
"""Return the allowlist record for this pair, if any."""
|
||||
for e in load_allowlist().get("approvals", []):
|
||||
if (
|
||||
isinstance(e, dict)
|
||||
and e.get("event") == event
|
||||
and e.get("command") == command
|
||||
):
|
||||
return e
|
||||
return None
|
||||
|
||||
|
||||
def script_mtime_iso(command: str) -> Optional[str]:
|
||||
"""ISO-8601 mtime of the resolved script path, or ``None`` if the
|
||||
script is missing."""
|
||||
path = _command_script_path(command)
|
||||
if not path:
|
||||
return None
|
||||
try:
|
||||
expanded = os.path.expanduser(path)
|
||||
return datetime.fromtimestamp(
|
||||
os.path.getmtime(expanded), tz=timezone.utc,
|
||||
).isoformat().replace("+00:00", "Z")
|
||||
except OSError:
|
||||
return None
|
||||
|
||||
|
||||
def script_is_executable(command: str) -> bool:
|
||||
"""Return ``True`` iff ``command`` is runnable as configured.
|
||||
|
||||
For a bare invocation (``/path/hook.sh``) the script itself must be
|
||||
executable. For interpreter-prefixed commands (``python3
|
||||
/path/hook.py``, ``/usr/bin/env bash hook.sh``) the script just has
|
||||
to be readable — the interpreter doesn't care about the ``X_OK``
|
||||
bit. Mirrors what ``_spawn`` would actually do at runtime."""
|
||||
path = _command_script_path(command)
|
||||
if not path:
|
||||
return False
|
||||
expanded = os.path.expanduser(path)
|
||||
if not os.path.isfile(expanded):
|
||||
return False
|
||||
try:
|
||||
argv = shlex.split(command)
|
||||
except ValueError:
|
||||
return False
|
||||
is_bare_invocation = bool(argv) and argv[0] == path
|
||||
required = os.X_OK if is_bare_invocation else os.R_OK
|
||||
return os.access(expanded, required)
|
||||
|
||||
|
||||
def run_once(
|
||||
spec: ShellHookSpec, kwargs: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""Fire a single shell-hook invocation with a synthetic payload.
|
||||
Used by ``hermes hooks test`` and ``hermes hooks doctor``.
|
||||
|
||||
``kwargs`` is the same dict that :func:`hermes_cli.plugins.invoke_hook`
|
||||
would pass at runtime. It is routed through :func:`_serialize_payload`
|
||||
so the synthetic stdin exactly matches what a real hook firing would
|
||||
produce — otherwise scripts tested via ``hermes hooks test`` could
|
||||
diverge silently from production behaviour.
|
||||
|
||||
Returns the :func:`_spawn` diagnostic dict plus a ``parsed`` field
|
||||
holding the canonical Hermes-wire-shape response."""
|
||||
stdin_json = _serialize_payload(spec.event, kwargs)
|
||||
result = _spawn(spec, stdin_json)
|
||||
result["parsed"] = _parse_response(spec.event, result["stdout"])
|
||||
return result
|
||||
@@ -1,54 +1,43 @@
|
||||
"""Shared slash command helpers for skills.
|
||||
"""Shared slash command helpers for skills and built-in prompt-style modes.
|
||||
|
||||
Shared between CLI (cli.py) and gateway (gateway/run.py) so both surfaces
|
||||
can invoke skills via /skill-name commands.
|
||||
can invoke skills via /skill-name commands and prompt-only built-ins like
|
||||
/plan.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from hermes_constants import display_hermes_home
|
||||
from agent.skill_preprocessing import (
|
||||
expand_inline_shell as _expand_inline_shell,
|
||||
load_skills_config as _load_skills_config,
|
||||
substitute_template_vars as _substitute_template_vars,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_skill_commands: Dict[str, Dict[str, Any]] = {}
|
||||
_skill_commands_platform: Optional[str] = None
|
||||
# Patterns for sanitizing skill names into clean hyphen-separated slugs.
|
||||
_SKILL_INVALID_CHARS = re.compile(r"[^a-z0-9-]")
|
||||
_SKILL_MULTI_HYPHEN = re.compile(r"-{2,}")
|
||||
_PLAN_SLUG_RE = re.compile(r"[^a-z0-9]+")
|
||||
|
||||
|
||||
def _resolve_skill_commands_platform() -> Optional[str]:
|
||||
"""Return the current platform scope used for disabled-skill filtering.
|
||||
def build_plan_path(
|
||||
user_instruction: str = "",
|
||||
*,
|
||||
now: datetime | None = None,
|
||||
) -> Path:
|
||||
"""Return the default workspace-relative markdown path for a /plan invocation.
|
||||
|
||||
Used to detect when the active platform has shifted so
|
||||
:func:`get_skill_commands` can drop a stale cache that was populated
|
||||
for a different platform's ``skills.platform_disabled`` view (#14536).
|
||||
|
||||
Resolves from (in order) ``HERMES_PLATFORM`` env var and
|
||||
``HERMES_SESSION_PLATFORM`` from the gateway session context. Returns
|
||||
``None`` when no platform scope is active (e.g. classic CLI, RL
|
||||
rollouts, standalone scripts).
|
||||
Relative paths are intentional: file tools are task/backend-aware and resolve
|
||||
them against the active working directory for local, docker, ssh, modal,
|
||||
daytona, and similar terminal backends. That keeps the plan with the active
|
||||
workspace instead of the Hermes host's global home directory.
|
||||
"""
|
||||
try:
|
||||
from gateway.session_context import get_session_env
|
||||
slug_source = (user_instruction or "").strip().splitlines()[0] if user_instruction else ""
|
||||
slug = _PLAN_SLUG_RE.sub("-", slug_source.lower()).strip("-")
|
||||
if slug:
|
||||
slug = "-".join(part for part in slug.split("-")[:8] if part)[:48].strip("-")
|
||||
slug = slug or "conversation-plan"
|
||||
timestamp = (now or datetime.now()).strftime("%Y-%m-%d_%H%M%S")
|
||||
return Path(".hermes") / "plans" / f"{timestamp}-{slug}.md"
|
||||
|
||||
resolved_platform = (
|
||||
os.getenv("HERMES_PLATFORM")
|
||||
or get_session_env("HERMES_SESSION_PLATFORM")
|
||||
)
|
||||
except Exception:
|
||||
resolved_platform = os.getenv("HERMES_PLATFORM")
|
||||
return resolved_platform or None
|
||||
|
||||
def _load_skill_payload(skill_identifier: str, task_id: str | None = None) -> tuple[dict[str, Any], Path | None, str] | None:
|
||||
"""Load a skill by name/path and return (loaded_payload, skill_dir, display_name)."""
|
||||
@@ -68,9 +57,7 @@ def _load_skill_payload(skill_identifier: str, task_id: str | None = None) -> tu
|
||||
else:
|
||||
normalized = raw_identifier.lstrip("/")
|
||||
|
||||
loaded_skill = json.loads(
|
||||
skill_view(normalized, task_id=task_id, preprocess=False)
|
||||
)
|
||||
loaded_skill = json.loads(skill_view(normalized, task_id=task_id))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@@ -80,14 +67,7 @@ def _load_skill_payload(skill_identifier: str, task_id: str | None = None) -> tu
|
||||
skill_name = str(loaded_skill.get("name") or normalized)
|
||||
skill_path = str(loaded_skill.get("path") or "")
|
||||
skill_dir = None
|
||||
# Prefer the absolute skill_dir returned by skill_view() — this is
|
||||
# correct for both local and external skills. Fall back to the old
|
||||
# SKILLS_DIR-relative reconstruction only when skill_dir is absent
|
||||
# (e.g. legacy skill_view responses).
|
||||
abs_skill_dir = loaded_skill.get("skill_dir")
|
||||
if abs_skill_dir:
|
||||
skill_dir = Path(abs_skill_dir)
|
||||
elif skill_path:
|
||||
if skill_path:
|
||||
try:
|
||||
skill_dir = SKILLS_DIR / Path(skill_path).parent
|
||||
except Exception:
|
||||
@@ -96,84 +76,20 @@ def _load_skill_payload(skill_identifier: str, task_id: str | None = None) -> tu
|
||||
return loaded_skill, skill_dir, skill_name
|
||||
|
||||
|
||||
def _inject_skill_config(loaded_skill: dict[str, Any], parts: list[str]) -> None:
|
||||
"""Resolve and inject skill-declared config values into the message parts.
|
||||
|
||||
If the loaded skill's frontmatter declares ``metadata.hermes.config``
|
||||
entries, their current values (from config.yaml or defaults) are appended
|
||||
as a ``[Skill config: ...]`` block so the agent knows the configured values
|
||||
without needing to read config.yaml itself.
|
||||
"""
|
||||
try:
|
||||
from agent.skill_utils import (
|
||||
extract_skill_config_vars,
|
||||
parse_frontmatter,
|
||||
resolve_skill_config_values,
|
||||
)
|
||||
|
||||
# The loaded_skill dict contains the raw content which includes frontmatter
|
||||
raw_content = str(loaded_skill.get("raw_content") or loaded_skill.get("content") or "")
|
||||
if not raw_content:
|
||||
return
|
||||
|
||||
frontmatter, _ = parse_frontmatter(raw_content)
|
||||
config_vars = extract_skill_config_vars(frontmatter)
|
||||
if not config_vars:
|
||||
return
|
||||
|
||||
resolved = resolve_skill_config_values(config_vars)
|
||||
if not resolved:
|
||||
return
|
||||
|
||||
lines = ["", f"[Skill config (from {display_hermes_home()}/config.yaml):"]
|
||||
for key, value in resolved.items():
|
||||
display_val = str(value) if value else "(not set)"
|
||||
lines.append(f" {key} = {display_val}")
|
||||
lines.append("]")
|
||||
parts.extend(lines)
|
||||
except Exception:
|
||||
pass # Non-critical — skill still loads without config injection
|
||||
|
||||
|
||||
def _build_skill_message(
|
||||
loaded_skill: dict[str, Any],
|
||||
skill_dir: Path | None,
|
||||
activation_note: str,
|
||||
user_instruction: str = "",
|
||||
runtime_note: str = "",
|
||||
session_id: str | None = None,
|
||||
) -> str:
|
||||
"""Format a loaded skill into a user/system message payload."""
|
||||
from tools.skills_tool import SKILLS_DIR
|
||||
|
||||
content = str(loaded_skill.get("content") or "")
|
||||
|
||||
# ── Template substitution and inline-shell expansion ──
|
||||
# Done before anything else so downstream blocks (setup notes,
|
||||
# supporting-file hints) see the expanded content.
|
||||
skills_cfg = _load_skills_config()
|
||||
if skills_cfg.get("template_vars", True):
|
||||
content = _substitute_template_vars(content, skill_dir, session_id)
|
||||
if skills_cfg.get("inline_shell", False):
|
||||
timeout = int(skills_cfg.get("inline_shell_timeout", 10) or 10)
|
||||
content = _expand_inline_shell(content, skill_dir, timeout)
|
||||
|
||||
parts = [activation_note, "", content.strip()]
|
||||
|
||||
# ── Inject the absolute skill directory so the agent can reference
|
||||
# bundled scripts without an extra skill_view() round-trip. ──
|
||||
if skill_dir:
|
||||
parts.append("")
|
||||
parts.append(f"[Skill directory: {skill_dir}]")
|
||||
parts.append(
|
||||
"Resolve any relative paths in this skill (e.g. `scripts/foo.js`, "
|
||||
"`templates/config.yaml`) against that directory, then run them "
|
||||
"with the terminal tool using the absolute path."
|
||||
)
|
||||
|
||||
# ── Inject resolved skill config values ──
|
||||
_inject_skill_config(loaded_skill, parts)
|
||||
|
||||
if loaded_skill.get("setup_skipped"):
|
||||
parts.extend(
|
||||
[
|
||||
@@ -207,7 +123,7 @@ def _build_skill_message(
|
||||
subdir_path = skill_dir / subdir
|
||||
if subdir_path.exists():
|
||||
for f in sorted(subdir_path.rglob("*")):
|
||||
if f.is_file() and not f.is_symlink():
|
||||
if f.is_file():
|
||||
rel = str(f.relative_to(skill_dir))
|
||||
supporting.append(rel)
|
||||
|
||||
@@ -218,13 +134,11 @@ def _build_skill_message(
|
||||
# Skill is from an external dir — use the skill name instead
|
||||
skill_view_target = skill_dir.name
|
||||
parts.append("")
|
||||
parts.append("[This skill has supporting files:]")
|
||||
parts.append("[This skill has supporting files you can load with the skill_view tool:]")
|
||||
for sf in supporting:
|
||||
parts.append(f"- {sf} -> {skill_dir / sf}")
|
||||
parts.append(f"- {sf}")
|
||||
parts.append(
|
||||
f'\nLoad any of these with skill_view(name="{skill_view_target}", '
|
||||
f'file_path="<path>"), or run scripts directly by absolute path '
|
||||
f"(e.g. `node {skill_dir}/scripts/foo.js`)."
|
||||
f'\nTo view any of these, use: skill_view(name="{skill_view_target}", file_path="<path>")'
|
||||
)
|
||||
|
||||
if user_instruction:
|
||||
@@ -244,12 +158,11 @@ def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
Returns:
|
||||
Dict mapping "/skill-name" to {name, description, skill_md_path, skill_dir}.
|
||||
"""
|
||||
global _skill_commands, _skill_commands_platform
|
||||
_skill_commands_platform = _resolve_skill_commands_platform()
|
||||
global _skill_commands
|
||||
_skill_commands = {}
|
||||
try:
|
||||
from tools.skills_tool import SKILLS_DIR, _parse_frontmatter, skill_matches_platform, _get_disabled_skill_names
|
||||
from agent.skill_utils import get_external_skills_dirs, iter_skill_index_files
|
||||
from agent.skill_utils import get_external_skills_dirs
|
||||
disabled = _get_disabled_skill_names()
|
||||
seen_names: set = set()
|
||||
|
||||
@@ -260,8 +173,8 @@ def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
dirs_to_scan.extend(get_external_skills_dirs())
|
||||
|
||||
for scan_dir in dirs_to_scan:
|
||||
for skill_md in iter_skill_index_files(scan_dir, "SKILL.md"):
|
||||
if any(part in ('.git', '.github', '.hub', '.archive') for part in skill_md.parts):
|
||||
for skill_md in scan_dir.rglob("SKILL.md"):
|
||||
if any(part in ('.git', '.github', '.hub') for part in skill_md.parts):
|
||||
continue
|
||||
try:
|
||||
content = skill_md.read_text(encoding='utf-8')
|
||||
@@ -283,14 +196,7 @@ def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
description = line[:80]
|
||||
break
|
||||
seen_names.add(name)
|
||||
# Normalize to hyphen-separated slug, stripping
|
||||
# non-alnum chars (e.g. +, /) to avoid invalid
|
||||
# Telegram command names downstream.
|
||||
cmd_name = name.lower().replace(' ', '-').replace('_', '-')
|
||||
cmd_name = _SKILL_INVALID_CHARS.sub('', cmd_name)
|
||||
cmd_name = _SKILL_MULTI_HYPHEN.sub('-', cmd_name).strip('-')
|
||||
if not cmd_name:
|
||||
continue
|
||||
_skill_commands[f"/{cmd_name}"] = {
|
||||
"name": name,
|
||||
"description": description or f"Invoke the {name} skill",
|
||||
@@ -305,104 +211,12 @@ def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
|
||||
|
||||
def get_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
"""Return the current skill commands mapping (scan first if empty).
|
||||
|
||||
Rescans when the active platform scope changes (e.g. a gateway
|
||||
process serving Telegram and Discord concurrently) so each platform
|
||||
sees its own ``skills.platform_disabled`` view (#14536).
|
||||
"""
|
||||
if (
|
||||
not _skill_commands
|
||||
or _skill_commands_platform != _resolve_skill_commands_platform()
|
||||
):
|
||||
"""Return the current skill commands mapping (scan first if empty)."""
|
||||
if not _skill_commands:
|
||||
scan_skill_commands()
|
||||
return _skill_commands
|
||||
|
||||
|
||||
def reload_skills() -> Dict[str, Any]:
|
||||
"""Re-scan the skills directory and return a diff of what changed.
|
||||
|
||||
Rescans ``~/.hermes/skills/`` and any ``skills.external_dirs`` so the
|
||||
slash-command map (``agent.skill_commands._skill_commands``) reflects
|
||||
skills added or removed on disk.
|
||||
|
||||
This does NOT invalidate the skills system-prompt cache. Skills are
|
||||
called by name via ``/skill-name``, ``skills_list``, or ``skill_view``
|
||||
— they don't need to be in the system prompt for the model to use them.
|
||||
Keeping the prompt cache intact preserves prefix caching across the
|
||||
reload, so a user invoking ``/reload-skills`` pays no cache-reset cost.
|
||||
|
||||
Returns:
|
||||
Dict with keys::
|
||||
|
||||
{
|
||||
"added": [{"name": str, "description": str}, ...],
|
||||
"removed": [{"name": str, "description": str}, ...],
|
||||
"unchanged": [skill names present before and after],
|
||||
"total": total skill count after rescan,
|
||||
"commands": total /slash-skill count after rescan,
|
||||
}
|
||||
|
||||
``description`` is the skill's full SKILL.md frontmatter
|
||||
``description:`` field — the same string the system prompt renders
|
||||
as `` - name: description`` for pre-existing skills.
|
||||
"""
|
||||
# Snapshot pre-reload state (name -> description) from the current
|
||||
# slash-command cache. Using dicts lets the post-rescan diff carry
|
||||
# descriptions for newly-visible or just-removed skills without a
|
||||
# second disk walk.
|
||||
def _snapshot(cmds: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
|
||||
out: Dict[str, str] = {}
|
||||
for slash_key, info in cmds.items():
|
||||
bare = slash_key.lstrip("/")
|
||||
out[bare] = (info or {}).get("description") or ""
|
||||
return out
|
||||
|
||||
before = _snapshot(_skill_commands)
|
||||
|
||||
# Rescan the skills dir. ``scan_skill_commands`` resets
|
||||
# ``_skill_commands = {}`` internally and repopulates it.
|
||||
new_commands = scan_skill_commands()
|
||||
|
||||
after = _snapshot(new_commands)
|
||||
|
||||
added_names = sorted(set(after) - set(before))
|
||||
removed_names = sorted(set(before) - set(after))
|
||||
unchanged = sorted(set(after) & set(before))
|
||||
|
||||
added = [{"name": n, "description": after[n]} for n in added_names]
|
||||
# For removed skills, use the description we had cached pre-rescan
|
||||
# (the skill file is gone so we can't re-read it).
|
||||
removed = [{"name": n, "description": before[n]} for n in removed_names]
|
||||
|
||||
return {
|
||||
"added": added,
|
||||
"removed": removed,
|
||||
"unchanged": unchanged,
|
||||
"total": len(after),
|
||||
"commands": len(new_commands),
|
||||
}
|
||||
|
||||
|
||||
def resolve_skill_command_key(command: str) -> Optional[str]:
|
||||
"""Resolve a user-typed /command to its canonical skill_cmds key.
|
||||
|
||||
Skills are always stored with hyphens — ``scan_skill_commands`` normalizes
|
||||
spaces and underscores to hyphens when building the key. Hyphens and
|
||||
underscores are treated interchangeably in user input: this matches
|
||||
``_check_unavailable_skill`` and accommodates Telegram bot-command names
|
||||
(which disallow hyphens, so ``/claude-code`` is registered as
|
||||
``/claude_code`` and comes back in the underscored form).
|
||||
|
||||
Returns the matching ``/slug`` key from ``get_skill_commands()`` or
|
||||
``None`` if no match.
|
||||
"""
|
||||
if not command:
|
||||
return None
|
||||
cmd_key = f"/{command.replace('_', '-')}"
|
||||
return cmd_key if cmd_key in get_skill_commands() else None
|
||||
|
||||
|
||||
def build_skill_invocation_message(
|
||||
cmd_key: str,
|
||||
user_instruction: str = "",
|
||||
@@ -428,16 +242,8 @@ def build_skill_invocation_message(
|
||||
return f"[Failed to load skill: {skill_info['name']}]"
|
||||
|
||||
loaded_skill, skill_dir, skill_name = loaded
|
||||
|
||||
# Track active usage for Curator lifecycle management (#17782)
|
||||
try:
|
||||
from tools.skill_usage import bump_use
|
||||
bump_use(skill_name)
|
||||
except Exception:
|
||||
pass # Non-critical — skill invocation proceeds regardless
|
||||
|
||||
activation_note = (
|
||||
f'[IMPORTANT: The user has invoked the "{skill_name}" skill, indicating they want '
|
||||
f'[SYSTEM: The user has invoked the "{skill_name}" skill, indicating they want '
|
||||
"you to follow its instructions. The full skill content is loaded below.]"
|
||||
)
|
||||
return _build_skill_message(
|
||||
@@ -446,7 +252,6 @@ def build_skill_invocation_message(
|
||||
activation_note,
|
||||
user_instruction=user_instruction,
|
||||
runtime_note=runtime_note,
|
||||
session_id=task_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -475,16 +280,8 @@ def build_preloaded_skills_prompt(
|
||||
continue
|
||||
|
||||
loaded_skill, skill_dir, skill_name = loaded
|
||||
|
||||
# Track active usage for Curator lifecycle management (#17782)
|
||||
try:
|
||||
from tools.skill_usage import bump_use
|
||||
bump_use(skill_name)
|
||||
except Exception:
|
||||
pass # Non-critical
|
||||
|
||||
activation_note = (
|
||||
f'[IMPORTANT: The user launched this CLI session with the "{skill_name}" skill '
|
||||
f'[SYSTEM: The user launched this CLI session with the "{skill_name}" skill '
|
||||
"preloaded. Treat its instructions as active guidance for the duration of this "
|
||||
"session unless the user overrides them.]"
|
||||
)
|
||||
@@ -493,7 +290,6 @@ def build_preloaded_skills_prompt(
|
||||
loaded_skill,
|
||||
skill_dir,
|
||||
activation_note,
|
||||
session_id=task_id,
|
||||
)
|
||||
)
|
||||
loaded_names.append(skill_name)
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
"""Shared SKILL.md preprocessing helpers."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Matches ${HERMES_SKILL_DIR} / ${HERMES_SESSION_ID} tokens in SKILL.md.
|
||||
# Tokens that don't resolve (e.g. ${HERMES_SESSION_ID} with no session) are
|
||||
# left as-is so the user can debug them.
|
||||
_SKILL_TEMPLATE_RE = re.compile(r"\$\{(HERMES_SKILL_DIR|HERMES_SESSION_ID)\}")
|
||||
|
||||
# Matches inline shell snippets like: !`date +%Y-%m-%d`
|
||||
# Non-greedy, single-line only -- no newlines inside the backticks.
|
||||
_INLINE_SHELL_RE = re.compile(r"!`([^`\n]+)`")
|
||||
|
||||
# Cap inline-shell output so a runaway command can't blow out the context.
|
||||
_INLINE_SHELL_MAX_OUTPUT = 4000
|
||||
|
||||
|
||||
def load_skills_config() -> dict:
|
||||
"""Load the ``skills`` section of config.yaml (best-effort)."""
|
||||
try:
|
||||
from hermes_cli.config import load_config
|
||||
|
||||
cfg = load_config() or {}
|
||||
skills_cfg = cfg.get("skills")
|
||||
if isinstance(skills_cfg, dict):
|
||||
return skills_cfg
|
||||
except Exception:
|
||||
logger.debug("Could not read skills config", exc_info=True)
|
||||
return {}
|
||||
|
||||
|
||||
def substitute_template_vars(
|
||||
content: str,
|
||||
skill_dir: Path | None,
|
||||
session_id: str | None,
|
||||
) -> str:
|
||||
"""Replace ${HERMES_SKILL_DIR} / ${HERMES_SESSION_ID} in skill content.
|
||||
|
||||
Only substitutes tokens for which a concrete value is available --
|
||||
unresolved tokens are left in place so the author can spot them.
|
||||
"""
|
||||
if not content:
|
||||
return content
|
||||
|
||||
skill_dir_str = str(skill_dir) if skill_dir else None
|
||||
|
||||
def _replace(match: re.Match) -> str:
|
||||
token = match.group(1)
|
||||
if token == "HERMES_SKILL_DIR" and skill_dir_str:
|
||||
return skill_dir_str
|
||||
if token == "HERMES_SESSION_ID" and session_id:
|
||||
return str(session_id)
|
||||
return match.group(0)
|
||||
|
||||
return _SKILL_TEMPLATE_RE.sub(_replace, content)
|
||||
|
||||
|
||||
def run_inline_shell(command: str, cwd: Path | None, timeout: int) -> str:
|
||||
"""Execute a single inline-shell snippet and return its stdout (trimmed).
|
||||
|
||||
Failures return a short ``[inline-shell error: ...]`` marker instead of
|
||||
raising, so one bad snippet can't wreck the whole skill message.
|
||||
"""
|
||||
try:
|
||||
completed = subprocess.run(
|
||||
["bash", "-c", command],
|
||||
cwd=str(cwd) if cwd else None,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=max(1, int(timeout)),
|
||||
check=False,
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
return f"[inline-shell timeout after {timeout}s: {command}]"
|
||||
except FileNotFoundError:
|
||||
return "[inline-shell error: bash not found]"
|
||||
except Exception as exc:
|
||||
return f"[inline-shell error: {exc}]"
|
||||
|
||||
output = (completed.stdout or "").rstrip("\n")
|
||||
if not output and completed.stderr:
|
||||
output = completed.stderr.rstrip("\n")
|
||||
if len(output) > _INLINE_SHELL_MAX_OUTPUT:
|
||||
output = output[:_INLINE_SHELL_MAX_OUTPUT] + "...[truncated]"
|
||||
return output
|
||||
|
||||
|
||||
def expand_inline_shell(
|
||||
content: str,
|
||||
skill_dir: Path | None,
|
||||
timeout: int,
|
||||
) -> str:
|
||||
"""Replace every !`cmd` snippet in ``content`` with its stdout.
|
||||
|
||||
Runs each snippet with the skill directory as CWD so relative paths in
|
||||
the snippet work the way the author expects.
|
||||
"""
|
||||
if "!`" not in content:
|
||||
return content
|
||||
|
||||
def _replace(match: re.Match) -> str:
|
||||
cmd = match.group(1).strip()
|
||||
if not cmd:
|
||||
return ""
|
||||
return run_inline_shell(cmd, skill_dir, timeout)
|
||||
|
||||
return _INLINE_SHELL_RE.sub(_replace, content)
|
||||
|
||||
|
||||
def preprocess_skill_content(
|
||||
content: str,
|
||||
skill_dir: Path | None,
|
||||
session_id: str | None = None,
|
||||
skills_cfg: dict | None = None,
|
||||
) -> str:
|
||||
"""Apply configured SKILL.md template and inline-shell preprocessing."""
|
||||
if not content:
|
||||
return content
|
||||
|
||||
cfg = skills_cfg if isinstance(skills_cfg, dict) else load_skills_config()
|
||||
if cfg.get("template_vars", True):
|
||||
content = substitute_template_vars(content, skill_dir, session_id)
|
||||
if cfg.get("inline_shell", False):
|
||||
timeout = int(cfg.get("inline_shell_timeout", 10) or 10)
|
||||
content = expand_inline_shell(content, skill_dir, timeout)
|
||||
return content
|
||||
@@ -12,7 +12,7 @@ import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from hermes_constants import get_config_path, get_skills_dir
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,7 +24,7 @@ PLATFORM_MAP = {
|
||||
"windows": "win32",
|
||||
}
|
||||
|
||||
EXCLUDED_SKILL_DIRS = frozenset((".git", ".github", ".hub", ".archive"))
|
||||
EXCLUDED_SKILL_DIRS = frozenset((".git", ".github", ".hub"))
|
||||
|
||||
# ── Lazy YAML loader ─────────────────────────────────────────────────────
|
||||
|
||||
@@ -118,19 +118,14 @@ def skill_matches_platform(frontmatter: Dict[str, Any]) -> bool:
|
||||
# ── Disabled skills ───────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def get_disabled_skill_names(platform: str | None = None) -> Set[str]:
|
||||
def get_disabled_skill_names() -> Set[str]:
|
||||
"""Read disabled skill names from config.yaml.
|
||||
|
||||
Args:
|
||||
platform: Explicit platform name (e.g. ``"telegram"``). When
|
||||
*None*, resolves from ``HERMES_PLATFORM`` or
|
||||
``HERMES_SESSION_PLATFORM`` env vars. Falls back to the
|
||||
global disabled list when no platform is determined.
|
||||
|
||||
Reads the config file directly (no CLI config imports) to stay
|
||||
lightweight.
|
||||
Resolves platform from ``HERMES_PLATFORM`` env var, falls back to
|
||||
the global disabled list. Reads the config file directly (no CLI
|
||||
config imports) to stay lightweight.
|
||||
"""
|
||||
config_path = get_config_path()
|
||||
config_path = get_hermes_home() / "config.yaml"
|
||||
if not config_path.exists():
|
||||
return set()
|
||||
try:
|
||||
@@ -145,12 +140,7 @@ def get_disabled_skill_names(platform: str | None = None) -> Set[str]:
|
||||
if not isinstance(skills_cfg, dict):
|
||||
return set()
|
||||
|
||||
from gateway.session_context import get_session_env
|
||||
resolved_platform = (
|
||||
platform
|
||||
or os.getenv("HERMES_PLATFORM")
|
||||
or get_session_env("HERMES_SESSION_PLATFORM")
|
||||
)
|
||||
resolved_platform = os.getenv("HERMES_PLATFORM")
|
||||
if resolved_platform:
|
||||
platform_disabled = (skills_cfg.get("platform_disabled") or {}).get(
|
||||
resolved_platform
|
||||
@@ -178,7 +168,7 @@ def get_external_skills_dirs() -> List[Path]:
|
||||
path. Only directories that actually exist are returned. Duplicates and
|
||||
paths that resolve to the local ``~/.hermes/skills/`` are silently skipped.
|
||||
"""
|
||||
config_path = get_config_path()
|
||||
config_path = get_hermes_home() / "config.yaml"
|
||||
if not config_path.exists():
|
||||
return []
|
||||
try:
|
||||
@@ -200,10 +190,7 @@ def get_external_skills_dirs() -> List[Path]:
|
||||
if not isinstance(raw_dirs, list):
|
||||
return []
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
hermes_home = get_hermes_home()
|
||||
local_skills = get_skills_dir().resolve()
|
||||
local_skills = (get_hermes_home() / "skills").resolve()
|
||||
seen: Set[Path] = set()
|
||||
result: List[Path] = []
|
||||
|
||||
@@ -213,12 +200,7 @@ def get_external_skills_dirs() -> List[Path]:
|
||||
continue
|
||||
# Expand ~ and environment variables
|
||||
expanded = os.path.expanduser(os.path.expandvars(entry))
|
||||
p = Path(expanded)
|
||||
# Resolve relative paths against HERMES_HOME, not cwd
|
||||
if not p.is_absolute():
|
||||
p = (hermes_home / p).resolve()
|
||||
else:
|
||||
p = p.resolve()
|
||||
p = Path(expanded).resolve()
|
||||
if p == local_skills:
|
||||
continue
|
||||
if p in seen:
|
||||
@@ -238,7 +220,7 @@ def get_all_skills_dirs() -> List[Path]:
|
||||
The local dir is always first (and always included even if it doesn't exist
|
||||
yet — callers handle that). External dirs follow in config order.
|
||||
"""
|
||||
dirs = [get_skills_dir()]
|
||||
dirs = [get_hermes_home() / "skills"]
|
||||
dirs.extend(get_external_skills_dirs())
|
||||
return dirs
|
||||
|
||||
@@ -263,163 +245,6 @@ def extract_skill_conditions(frontmatter: Dict[str, Any]) -> Dict[str, List]:
|
||||
}
|
||||
|
||||
|
||||
# ── Skill config extraction ───────────────────────────────────────────────
|
||||
|
||||
|
||||
def extract_skill_config_vars(frontmatter: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""Extract config variable declarations from parsed frontmatter.
|
||||
|
||||
Skills declare config.yaml settings they need via::
|
||||
|
||||
metadata:
|
||||
hermes:
|
||||
config:
|
||||
- key: wiki.path
|
||||
description: Path to the LLM Wiki knowledge base directory
|
||||
default: "~/wiki"
|
||||
prompt: Wiki directory path
|
||||
|
||||
Returns a list of dicts with keys: ``key``, ``description``, ``default``,
|
||||
``prompt``. Invalid or incomplete entries are silently skipped.
|
||||
"""
|
||||
metadata = frontmatter.get("metadata")
|
||||
if not isinstance(metadata, dict):
|
||||
return []
|
||||
hermes = metadata.get("hermes")
|
||||
if not isinstance(hermes, dict):
|
||||
return []
|
||||
raw = hermes.get("config")
|
||||
if not raw:
|
||||
return []
|
||||
if isinstance(raw, dict):
|
||||
raw = [raw]
|
||||
if not isinstance(raw, list):
|
||||
return []
|
||||
|
||||
result: List[Dict[str, Any]] = []
|
||||
seen: set = set()
|
||||
for item in raw:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
key = str(item.get("key", "")).strip()
|
||||
if not key or key in seen:
|
||||
continue
|
||||
# Must have at least key and description
|
||||
desc = str(item.get("description", "")).strip()
|
||||
if not desc:
|
||||
continue
|
||||
entry: Dict[str, Any] = {
|
||||
"key": key,
|
||||
"description": desc,
|
||||
}
|
||||
default = item.get("default")
|
||||
if default is not None:
|
||||
entry["default"] = default
|
||||
prompt_text = item.get("prompt")
|
||||
if isinstance(prompt_text, str) and prompt_text.strip():
|
||||
entry["prompt"] = prompt_text.strip()
|
||||
else:
|
||||
entry["prompt"] = desc
|
||||
seen.add(key)
|
||||
result.append(entry)
|
||||
return result
|
||||
|
||||
|
||||
def discover_all_skill_config_vars() -> List[Dict[str, Any]]:
|
||||
"""Scan all enabled skills and collect their config variable declarations.
|
||||
|
||||
Walks every skills directory, parses each SKILL.md frontmatter, and returns
|
||||
a deduplicated list of config var dicts. Each dict also includes a
|
||||
``skill`` key with the skill name for attribution.
|
||||
|
||||
Disabled and platform-incompatible skills are excluded.
|
||||
"""
|
||||
all_vars: List[Dict[str, Any]] = []
|
||||
seen_keys: set = set()
|
||||
|
||||
disabled = get_disabled_skill_names()
|
||||
for skills_dir in get_all_skills_dirs():
|
||||
if not skills_dir.is_dir():
|
||||
continue
|
||||
for skill_file in iter_skill_index_files(skills_dir, "SKILL.md"):
|
||||
try:
|
||||
raw = skill_file.read_text(encoding="utf-8")
|
||||
frontmatter, _ = parse_frontmatter(raw)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
skill_name = frontmatter.get("name") or skill_file.parent.name
|
||||
if str(skill_name) in disabled:
|
||||
continue
|
||||
if not skill_matches_platform(frontmatter):
|
||||
continue
|
||||
|
||||
config_vars = extract_skill_config_vars(frontmatter)
|
||||
for var in config_vars:
|
||||
if var["key"] not in seen_keys:
|
||||
var["skill"] = str(skill_name)
|
||||
all_vars.append(var)
|
||||
seen_keys.add(var["key"])
|
||||
|
||||
return all_vars
|
||||
|
||||
|
||||
# Storage prefix: all skill config vars are stored under skills.config.*
|
||||
# in config.yaml. Skill authors declare logical keys (e.g. "wiki.path");
|
||||
# the system adds this prefix for storage and strips it for display.
|
||||
SKILL_CONFIG_PREFIX = "skills.config"
|
||||
|
||||
|
||||
def _resolve_dotpath(config: Dict[str, Any], dotted_key: str):
|
||||
"""Walk a nested dict following a dotted key. Returns None if any part is missing."""
|
||||
parts = dotted_key.split(".")
|
||||
current = config
|
||||
for part in parts:
|
||||
if isinstance(current, dict) and part in current:
|
||||
current = current[part]
|
||||
else:
|
||||
return None
|
||||
return current
|
||||
|
||||
|
||||
def resolve_skill_config_values(
|
||||
config_vars: List[Dict[str, Any]],
|
||||
) -> Dict[str, Any]:
|
||||
"""Resolve current values for skill config vars from config.yaml.
|
||||
|
||||
Skill config is stored under ``skills.config.<key>`` in config.yaml.
|
||||
Returns a dict mapping **logical** keys (as declared by skills) to their
|
||||
current values (or the declared default if the key isn't set).
|
||||
Path values are expanded via ``os.path.expanduser``.
|
||||
"""
|
||||
config_path = get_config_path()
|
||||
config: Dict[str, Any] = {}
|
||||
if config_path.exists():
|
||||
try:
|
||||
parsed = yaml_load(config_path.read_text(encoding="utf-8"))
|
||||
if isinstance(parsed, dict):
|
||||
config = parsed
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
resolved: Dict[str, Any] = {}
|
||||
for var in config_vars:
|
||||
logical_key = var["key"]
|
||||
storage_key = f"{SKILL_CONFIG_PREFIX}.{logical_key}"
|
||||
value = _resolve_dotpath(config, storage_key)
|
||||
|
||||
if value is None or (isinstance(value, str) and not value.strip()):
|
||||
value = var.get("default", "")
|
||||
|
||||
# Expand ~ in path-like values
|
||||
if isinstance(value, str) and ("~" in value or "${" in value):
|
||||
value = os.path.expanduser(os.path.expandvars(value))
|
||||
|
||||
resolved[logical_key] = value
|
||||
|
||||
return resolved
|
||||
|
||||
|
||||
# ── Description extraction ────────────────────────────────────────────────
|
||||
|
||||
|
||||
@@ -440,34 +265,12 @@ def extract_skill_description(frontmatter: Dict[str, Any]) -> str:
|
||||
def iter_skill_index_files(skills_dir: Path, filename: str):
|
||||
"""Walk skills_dir yielding sorted paths matching *filename*.
|
||||
|
||||
Excludes ``.git``, ``.github``, ``.hub``, ``.archive`` directories.
|
||||
Excludes ``.git``, ``.github``, ``.hub`` directories.
|
||||
"""
|
||||
matches = []
|
||||
for root, dirs, files in os.walk(skills_dir, followlinks=True):
|
||||
for root, dirs, files in os.walk(skills_dir):
|
||||
dirs[:] = [d for d in dirs if d not in EXCLUDED_SKILL_DIRS]
|
||||
if filename in files:
|
||||
matches.append(Path(root) / filename)
|
||||
for path in sorted(matches, key=lambda p: str(p.relative_to(skills_dir))):
|
||||
yield path
|
||||
|
||||
|
||||
# ── Namespace helpers for plugin-provided skills ───────────────────────────
|
||||
|
||||
_NAMESPACE_RE = re.compile(r"^[a-zA-Z0-9_-]+$")
|
||||
|
||||
|
||||
def parse_qualified_name(name: str) -> Tuple[Optional[str], str]:
|
||||
"""Split ``'namespace:skill-name'`` into ``(namespace, bare_name)``.
|
||||
|
||||
Returns ``(None, name)`` when there is no ``':'``.
|
||||
"""
|
||||
if ":" not in name:
|
||||
return None, name
|
||||
return tuple(name.split(":", 1)) # type: ignore[return-value]
|
||||
|
||||
|
||||
def is_valid_namespace(candidate: Optional[str]) -> bool:
|
||||
"""Check whether *candidate* is a valid namespace (``[a-zA-Z0-9_-]+``)."""
|
||||
if not candidate:
|
||||
return False
|
||||
return bool(_NAMESPACE_RE.match(candidate))
|
||||
|
||||
194
agent/smart_model_routing.py
Normal file
194
agent/smart_model_routing.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""Helpers for optional cheap-vs-strong model routing."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from utils import is_truthy_value
|
||||
|
||||
_COMPLEX_KEYWORDS = {
|
||||
"debug",
|
||||
"debugging",
|
||||
"implement",
|
||||
"implementation",
|
||||
"refactor",
|
||||
"patch",
|
||||
"traceback",
|
||||
"stacktrace",
|
||||
"exception",
|
||||
"error",
|
||||
"analyze",
|
||||
"analysis",
|
||||
"investigate",
|
||||
"architecture",
|
||||
"design",
|
||||
"compare",
|
||||
"benchmark",
|
||||
"optimize",
|
||||
"optimise",
|
||||
"review",
|
||||
"terminal",
|
||||
"shell",
|
||||
"tool",
|
||||
"tools",
|
||||
"pytest",
|
||||
"test",
|
||||
"tests",
|
||||
"plan",
|
||||
"planning",
|
||||
"delegate",
|
||||
"subagent",
|
||||
"cron",
|
||||
"docker",
|
||||
"kubernetes",
|
||||
}
|
||||
|
||||
_URL_RE = re.compile(r"https?://|www\.", re.IGNORECASE)
|
||||
|
||||
|
||||
def _coerce_bool(value: Any, default: bool = False) -> bool:
|
||||
return is_truthy_value(value, default=default)
|
||||
|
||||
|
||||
def _coerce_int(value: Any, default: int) -> int:
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def choose_cheap_model_route(user_message: str, routing_config: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
"""Return the configured cheap-model route when a message looks simple.
|
||||
|
||||
Conservative by design: if the message has signs of code/tool/debugging/
|
||||
long-form work, keep the primary model.
|
||||
"""
|
||||
cfg = routing_config or {}
|
||||
if not _coerce_bool(cfg.get("enabled"), False):
|
||||
return None
|
||||
|
||||
cheap_model = cfg.get("cheap_model") or {}
|
||||
if not isinstance(cheap_model, dict):
|
||||
return None
|
||||
provider = str(cheap_model.get("provider") or "").strip().lower()
|
||||
model = str(cheap_model.get("model") or "").strip()
|
||||
if not provider or not model:
|
||||
return None
|
||||
|
||||
text = (user_message or "").strip()
|
||||
if not text:
|
||||
return None
|
||||
|
||||
max_chars = _coerce_int(cfg.get("max_simple_chars"), 160)
|
||||
max_words = _coerce_int(cfg.get("max_simple_words"), 28)
|
||||
|
||||
if len(text) > max_chars:
|
||||
return None
|
||||
if len(text.split()) > max_words:
|
||||
return None
|
||||
if text.count("\n") > 1:
|
||||
return None
|
||||
if "```" in text or "`" in text:
|
||||
return None
|
||||
if _URL_RE.search(text):
|
||||
return None
|
||||
|
||||
lowered = text.lower()
|
||||
words = {token.strip(".,:;!?()[]{}\"'`") for token in lowered.split()}
|
||||
if words & _COMPLEX_KEYWORDS:
|
||||
return None
|
||||
|
||||
route = dict(cheap_model)
|
||||
route["provider"] = provider
|
||||
route["model"] = model
|
||||
route["routing_reason"] = "simple_turn"
|
||||
return route
|
||||
|
||||
|
||||
def resolve_turn_route(user_message: str, routing_config: Optional[Dict[str, Any]], primary: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Resolve the effective model/runtime for one turn.
|
||||
|
||||
Returns a dict with model/runtime/signature/label fields.
|
||||
"""
|
||||
route = choose_cheap_model_route(user_message, routing_config)
|
||||
if not route:
|
||||
return {
|
||||
"model": primary.get("model"),
|
||||
"runtime": {
|
||||
"api_key": primary.get("api_key"),
|
||||
"base_url": primary.get("base_url"),
|
||||
"provider": primary.get("provider"),
|
||||
"api_mode": primary.get("api_mode"),
|
||||
"command": primary.get("command"),
|
||||
"args": list(primary.get("args") or []),
|
||||
"credential_pool": primary.get("credential_pool"),
|
||||
},
|
||||
"label": None,
|
||||
"signature": (
|
||||
primary.get("model"),
|
||||
primary.get("provider"),
|
||||
primary.get("base_url"),
|
||||
primary.get("api_mode"),
|
||||
primary.get("command"),
|
||||
tuple(primary.get("args") or ()),
|
||||
),
|
||||
}
|
||||
|
||||
from hermes_cli.runtime_provider import resolve_runtime_provider
|
||||
|
||||
explicit_api_key = None
|
||||
api_key_env = str(route.get("api_key_env") or "").strip()
|
||||
if api_key_env:
|
||||
explicit_api_key = os.getenv(api_key_env) or None
|
||||
|
||||
try:
|
||||
runtime = resolve_runtime_provider(
|
||||
requested=route.get("provider"),
|
||||
explicit_api_key=explicit_api_key,
|
||||
explicit_base_url=route.get("base_url"),
|
||||
)
|
||||
except Exception:
|
||||
return {
|
||||
"model": primary.get("model"),
|
||||
"runtime": {
|
||||
"api_key": primary.get("api_key"),
|
||||
"base_url": primary.get("base_url"),
|
||||
"provider": primary.get("provider"),
|
||||
"api_mode": primary.get("api_mode"),
|
||||
"command": primary.get("command"),
|
||||
"args": list(primary.get("args") or []),
|
||||
"credential_pool": primary.get("credential_pool"),
|
||||
},
|
||||
"label": None,
|
||||
"signature": (
|
||||
primary.get("model"),
|
||||
primary.get("provider"),
|
||||
primary.get("base_url"),
|
||||
primary.get("api_mode"),
|
||||
primary.get("command"),
|
||||
tuple(primary.get("args") or ()),
|
||||
),
|
||||
}
|
||||
|
||||
return {
|
||||
"model": route.get("model"),
|
||||
"runtime": {
|
||||
"api_key": runtime.get("api_key"),
|
||||
"base_url": runtime.get("base_url"),
|
||||
"provider": runtime.get("provider"),
|
||||
"api_mode": runtime.get("api_mode"),
|
||||
"command": runtime.get("command"),
|
||||
"args": list(runtime.get("args") or []),
|
||||
},
|
||||
"label": f"smart route → {route.get('model')} ({runtime.get('provider')})",
|
||||
"signature": (
|
||||
route.get("model"),
|
||||
runtime.get("provider"),
|
||||
runtime.get("base_url"),
|
||||
runtime.get("api_mode"),
|
||||
runtime.get("command"),
|
||||
tuple(runtime.get("args") or ()),
|
||||
),
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
"""Progressive subdirectory hint discovery.
|
||||
|
||||
As the agent navigates into subdirectories via tool calls (read_file, terminal,
|
||||
search_files, etc.), this module discovers and loads project context files
|
||||
(AGENTS.md, CLAUDE.md, .cursorrules) from those directories. Discovered hints
|
||||
are appended to the tool result so the model gets relevant context at the moment
|
||||
it starts working in a new area of the codebase.
|
||||
|
||||
This complements the startup context loading in ``prompt_builder.py`` which only
|
||||
loads from the CWD. Subdirectory hints are discovered lazily and injected into
|
||||
the conversation without modifying the system prompt (preserving prompt caching).
|
||||
|
||||
Inspired by Block/goose's SubdirectoryHintTracker.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shlex
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional, Set
|
||||
|
||||
from agent.prompt_builder import _scan_context_content
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Context files to look for in subdirectories, in priority order.
|
||||
# Same filenames as prompt_builder.py but we load ALL found (not first-wins)
|
||||
# since different subdirectories may use different conventions.
|
||||
_HINT_FILENAMES = [
|
||||
"AGENTS.md", "agents.md",
|
||||
"CLAUDE.md", "claude.md",
|
||||
".cursorrules",
|
||||
]
|
||||
|
||||
# Maximum chars per hint file to prevent context bloat
|
||||
_MAX_HINT_CHARS = 8_000
|
||||
|
||||
# Tool argument keys that typically contain file paths
|
||||
_PATH_ARG_KEYS = {"path", "file_path", "workdir"}
|
||||
|
||||
# Tools that take shell commands where we should extract paths
|
||||
_COMMAND_TOOLS = {"terminal"}
|
||||
|
||||
# How many parent directories to walk up when looking for hints.
|
||||
# Prevents scanning all the way to / for deeply nested paths.
|
||||
_MAX_ANCESTOR_WALK = 5
|
||||
|
||||
class SubdirectoryHintTracker:
|
||||
"""Track which directories the agent visits and load hints on first access.
|
||||
|
||||
Usage::
|
||||
|
||||
tracker = SubdirectoryHintTracker(working_dir="/path/to/project")
|
||||
|
||||
# After each tool call:
|
||||
hints = tracker.check_tool_call("read_file", {"path": "backend/src/main.py"})
|
||||
if hints:
|
||||
tool_result += hints # append to the tool result string
|
||||
"""
|
||||
|
||||
def __init__(self, working_dir: Optional[str] = None):
|
||||
self.working_dir = Path(working_dir or os.getcwd()).resolve()
|
||||
self._loaded_dirs: Set[Path] = set()
|
||||
# Pre-mark the working dir as loaded (startup context handles it)
|
||||
self._loaded_dirs.add(self.working_dir)
|
||||
|
||||
def check_tool_call(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_args: Dict[str, Any],
|
||||
) -> Optional[str]:
|
||||
"""Check tool call arguments for new directories and load any hint files.
|
||||
|
||||
Returns formatted hint text to append to the tool result, or None.
|
||||
"""
|
||||
dirs = self._extract_directories(tool_name, tool_args)
|
||||
if not dirs:
|
||||
return None
|
||||
|
||||
all_hints = []
|
||||
for d in dirs:
|
||||
hints = self._load_hints_for_directory(d)
|
||||
if hints:
|
||||
all_hints.append(hints)
|
||||
|
||||
if not all_hints:
|
||||
return None
|
||||
|
||||
return "\n\n" + "\n\n".join(all_hints)
|
||||
|
||||
def _extract_directories(
|
||||
self, tool_name: str, args: Dict[str, Any]
|
||||
) -> list:
|
||||
"""Extract directory paths from tool call arguments."""
|
||||
candidates: Set[Path] = set()
|
||||
|
||||
# Direct path arguments
|
||||
for key in _PATH_ARG_KEYS:
|
||||
val = args.get(key)
|
||||
if isinstance(val, str) and val.strip():
|
||||
self._add_path_candidate(val, candidates)
|
||||
|
||||
# Shell commands — extract path-like tokens
|
||||
if tool_name in _COMMAND_TOOLS:
|
||||
cmd = args.get("command", "")
|
||||
if isinstance(cmd, str):
|
||||
self._extract_paths_from_command(cmd, candidates)
|
||||
|
||||
return list(candidates)
|
||||
|
||||
def _add_path_candidate(self, raw_path: str, candidates: Set[Path]):
|
||||
"""Resolve a raw path and add its directory + ancestors to candidates.
|
||||
|
||||
Walks up from the resolved directory toward the filesystem root,
|
||||
stopping at the first directory already in ``_loaded_dirs`` (or after
|
||||
``_MAX_ANCESTOR_WALK`` levels). This ensures that reading
|
||||
``project/src/main.py`` discovers ``project/AGENTS.md`` even when
|
||||
``project/src/`` has no hint files of its own.
|
||||
"""
|
||||
try:
|
||||
p = Path(raw_path).expanduser()
|
||||
if not p.is_absolute():
|
||||
p = self.working_dir / p
|
||||
p = p.resolve()
|
||||
# Use parent if it's a file path (has extension or doesn't exist as dir)
|
||||
if p.suffix or (p.exists() and p.is_file()):
|
||||
p = p.parent
|
||||
# Walk up ancestors — stop at already-loaded or root
|
||||
for _ in range(_MAX_ANCESTOR_WALK):
|
||||
if p in self._loaded_dirs:
|
||||
break
|
||||
if self._is_valid_subdir(p):
|
||||
candidates.add(p)
|
||||
parent = p.parent
|
||||
if parent == p:
|
||||
break # filesystem root
|
||||
p = parent
|
||||
except (OSError, ValueError):
|
||||
pass
|
||||
|
||||
def _extract_paths_from_command(self, cmd: str, candidates: Set[Path]):
|
||||
"""Extract path-like tokens from a shell command string."""
|
||||
try:
|
||||
tokens = shlex.split(cmd)
|
||||
except ValueError:
|
||||
tokens = cmd.split()
|
||||
|
||||
for token in tokens:
|
||||
# Skip flags
|
||||
if token.startswith("-"):
|
||||
continue
|
||||
# Must look like a path (contains / or .)
|
||||
if "/" not in token and "." not in token:
|
||||
continue
|
||||
# Skip URLs
|
||||
if token.startswith(("http://", "https://", "git@")):
|
||||
continue
|
||||
self._add_path_candidate(token, candidates)
|
||||
|
||||
def _is_valid_subdir(self, path: Path) -> bool:
|
||||
"""Check if path is a valid directory to scan for hints."""
|
||||
try:
|
||||
if not path.is_dir():
|
||||
return False
|
||||
except OSError:
|
||||
return False
|
||||
if path in self._loaded_dirs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _load_hints_for_directory(self, directory: Path) -> Optional[str]:
|
||||
"""Load hint files from a directory. Returns formatted text or None."""
|
||||
self._loaded_dirs.add(directory)
|
||||
|
||||
found_hints = []
|
||||
for filename in _HINT_FILENAMES:
|
||||
hint_path = directory / filename
|
||||
try:
|
||||
if not hint_path.is_file():
|
||||
continue
|
||||
except OSError:
|
||||
continue
|
||||
try:
|
||||
content = hint_path.read_text(encoding="utf-8").strip()
|
||||
if not content:
|
||||
continue
|
||||
# Same security scan as startup context loading
|
||||
content = _scan_context_content(content, filename)
|
||||
if len(content) > _MAX_HINT_CHARS:
|
||||
content = (
|
||||
content[:_MAX_HINT_CHARS]
|
||||
+ f"\n\n[...truncated {filename}: {len(content):,} chars total]"
|
||||
)
|
||||
# Best-effort relative path for display
|
||||
rel_path = str(hint_path)
|
||||
try:
|
||||
rel_path = str(hint_path.relative_to(self.working_dir))
|
||||
except ValueError:
|
||||
try:
|
||||
rel_path = str(hint_path.relative_to(Path.home()))
|
||||
rel_path = "~/" + rel_path
|
||||
except ValueError:
|
||||
pass # keep absolute
|
||||
found_hints.append((rel_path, content))
|
||||
# First match wins per directory (like startup loading)
|
||||
break
|
||||
except Exception as exc:
|
||||
logger.debug("Could not read %s: %s", hint_path, exc)
|
||||
|
||||
if not found_hints:
|
||||
return None
|
||||
|
||||
sections = []
|
||||
for rel_path, content in found_hints:
|
||||
sections.append(
|
||||
f"[Subdirectory context discovered: {rel_path}]\n{content}"
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Loaded subdirectory hints from %s: %s",
|
||||
directory,
|
||||
[h[0] for h in found_hints],
|
||||
)
|
||||
return "\n\n".join(sections)
|
||||
@@ -1,386 +0,0 @@
|
||||
"""Stateful scrubber for reasoning/thinking blocks in streamed assistant text.
|
||||
|
||||
``run_agent._strip_think_blocks`` is regex-based and correct for a complete
|
||||
string, but when it runs *per-delta* in ``_fire_stream_delta`` it destroys
|
||||
the state that downstream consumers (CLI ``_stream_delta``, gateway
|
||||
``GatewayStreamConsumer._filter_and_accumulate``) rely on.
|
||||
|
||||
Concretely, when MiniMax-M2.7 streams
|
||||
|
||||
delta1 = "<think>"
|
||||
delta2 = "Let me check their config"
|
||||
delta3 = "</think>"
|
||||
|
||||
the per-delta regex erases delta1 entirely (case 2: unterminated-open at
|
||||
boundary matches ``^<think>...``), so the downstream state machine never
|
||||
sees the open tag, treats delta2 as regular content, and leaks reasoning
|
||||
to the user. Consumers that don't run their own state machine (ACP,
|
||||
api_server, TTS) never had any defence at all — they just emitted
|
||||
whatever survived the upstream regex.
|
||||
|
||||
This module centralises the tag-suppression state machine at the
|
||||
upstream layer so every stream_delta_callback sees text that has
|
||||
already had reasoning blocks removed. Partial tags at delta
|
||||
boundaries are held back until the next delta resolves them, and
|
||||
end-of-stream flushing surfaces any held-back prose that turned out
|
||||
not to be a real tag.
|
||||
|
||||
Usage::
|
||||
|
||||
scrubber = StreamingThinkScrubber()
|
||||
for delta in stream:
|
||||
visible = scrubber.feed(delta)
|
||||
if visible:
|
||||
emit(visible)
|
||||
tail = scrubber.flush() # at end of stream
|
||||
if tail:
|
||||
emit(tail)
|
||||
|
||||
The scrubber is re-entrant per agent instance. Call ``reset()`` at
|
||||
the top of each new turn so a hung block from an interrupted prior
|
||||
stream cannot taint the next turn's output.
|
||||
|
||||
Tag variants handled (case-insensitive):
|
||||
``<think>``, ``<thinking>``, ``<reasoning>``, ``<thought>``,
|
||||
``<REASONING_SCRATCHPAD>``.
|
||||
|
||||
Block-boundary rule for opens: an opening tag is only treated as a
|
||||
reasoning-block opener when it appears at the start of the stream,
|
||||
after a newline (optionally followed by whitespace), or when only
|
||||
whitespace has been emitted on the current line. This prevents prose
|
||||
that *mentions* the tag name (e.g. ``"use <think> tags here"``) from
|
||||
being incorrectly suppressed. Closed pairs (``<think>X</think>``) are
|
||||
always suppressed regardless of boundary; a closed pair is an
|
||||
intentional, bounded construct.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
__all__ = ["StreamingThinkScrubber"]
|
||||
|
||||
|
||||
class StreamingThinkScrubber:
|
||||
"""Stateful scrubber for streaming reasoning/thinking blocks.
|
||||
|
||||
State machine:
|
||||
- ``_in_block``: True while inside an opened block, waiting for
|
||||
a close tag. All text inside is discarded.
|
||||
- ``_buf``: held-back partial-tag tail. Emitted / discarded on
|
||||
the next ``feed()`` call or by ``flush()``.
|
||||
- ``_last_emitted_ended_newline``: True iff the most recent
|
||||
emission to the consumer ended with ``\\n``, or nothing has
|
||||
been emitted yet (start-of-stream counts as a boundary). Used
|
||||
to decide whether an open tag at buffer position 0 is at a
|
||||
block boundary.
|
||||
"""
|
||||
|
||||
_OPEN_TAG_NAMES: Tuple[str, ...] = (
|
||||
"think",
|
||||
"thinking",
|
||||
"reasoning",
|
||||
"thought",
|
||||
"REASONING_SCRATCHPAD",
|
||||
)
|
||||
|
||||
# Materialise literal tag strings so the hot path does string
|
||||
# operations, not regex compilation per feed().
|
||||
_OPEN_TAGS: Tuple[str, ...] = tuple(f"<{name}>" for name in _OPEN_TAG_NAMES)
|
||||
_CLOSE_TAGS: Tuple[str, ...] = tuple(f"</{name}>" for name in _OPEN_TAG_NAMES)
|
||||
|
||||
# Pre-compute the longest tag (for partial-tag hold-back bound).
|
||||
_MAX_TAG_LEN: int = max(len(tag) for tag in _OPEN_TAGS + _CLOSE_TAGS)
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._in_block: bool = False
|
||||
self._buf: str = ""
|
||||
self._last_emitted_ended_newline: bool = True
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset all state. Call at the top of every new turn."""
|
||||
self._in_block = False
|
||||
self._buf = ""
|
||||
self._last_emitted_ended_newline = True
|
||||
|
||||
def feed(self, text: str) -> str:
|
||||
"""Feed one delta; return the scrubbed visible portion.
|
||||
|
||||
May return an empty string when the entire delta is reasoning
|
||||
content or is being held back pending resolution of a partial
|
||||
tag at the boundary.
|
||||
"""
|
||||
if not text:
|
||||
return ""
|
||||
buf = self._buf + text
|
||||
self._buf = ""
|
||||
out: list[str] = []
|
||||
|
||||
while buf:
|
||||
if self._in_block:
|
||||
# Hunt for the earliest close tag.
|
||||
close_idx, close_len = self._find_first_tag(
|
||||
buf, self._CLOSE_TAGS,
|
||||
)
|
||||
if close_idx == -1:
|
||||
# No close yet — hold back a potential partial
|
||||
# close-tag prefix; discard everything else.
|
||||
held = self._max_partial_suffix(buf, self._CLOSE_TAGS)
|
||||
self._buf = buf[-held:] if held else ""
|
||||
return "".join(out)
|
||||
# Found close: discard block content + tag, continue.
|
||||
buf = buf[close_idx + close_len:]
|
||||
self._in_block = False
|
||||
else:
|
||||
# Priority 1 — closed <tag>X</tag> pair anywhere in
|
||||
# buf. Closed pairs are always an intentional,
|
||||
# bounded construct (even mid-line prose containing
|
||||
# an open/close pair is almost certainly a model
|
||||
# leaking reasoning inline), so no boundary gating.
|
||||
pair = self._find_earliest_closed_pair(buf)
|
||||
# Priority 2 — unterminated open tag at a block
|
||||
# boundary. Boundary-gated so prose that mentions
|
||||
# '<think>' isn't over-stripped.
|
||||
open_idx, open_len = self._find_open_at_boundary(
|
||||
buf, out,
|
||||
)
|
||||
|
||||
# Pick whichever match comes earliest in the buffer.
|
||||
if pair is not None and (
|
||||
open_idx == -1 or pair[0] <= open_idx
|
||||
):
|
||||
start_idx, end_idx = pair
|
||||
preceding = buf[:start_idx]
|
||||
if preceding:
|
||||
preceding = self._strip_orphan_close_tags(preceding)
|
||||
if preceding:
|
||||
out.append(preceding)
|
||||
self._last_emitted_ended_newline = (
|
||||
preceding.endswith("\n")
|
||||
)
|
||||
buf = buf[end_idx:]
|
||||
continue
|
||||
|
||||
if open_idx != -1:
|
||||
# Unterminated open at boundary — emit preceding,
|
||||
# enter block, continue loop with remainder.
|
||||
preceding = buf[:open_idx]
|
||||
if preceding:
|
||||
preceding = self._strip_orphan_close_tags(preceding)
|
||||
if preceding:
|
||||
out.append(preceding)
|
||||
self._last_emitted_ended_newline = (
|
||||
preceding.endswith("\n")
|
||||
)
|
||||
self._in_block = True
|
||||
buf = buf[open_idx + open_len:]
|
||||
continue
|
||||
|
||||
# No resolvable tag structure in buf. Hold back any
|
||||
# partial-tag prefix at the tail so a split tag
|
||||
# across deltas isn't missed, then emit the rest.
|
||||
held = self._max_partial_suffix(buf, self._OPEN_TAGS)
|
||||
held_close = self._max_partial_suffix(
|
||||
buf, self._CLOSE_TAGS,
|
||||
)
|
||||
held = max(held, held_close)
|
||||
if held:
|
||||
emit_text = buf[:-held]
|
||||
self._buf = buf[-held:]
|
||||
else:
|
||||
emit_text = buf
|
||||
self._buf = ""
|
||||
if emit_text:
|
||||
emit_text = self._strip_orphan_close_tags(emit_text)
|
||||
if emit_text:
|
||||
out.append(emit_text)
|
||||
self._last_emitted_ended_newline = (
|
||||
emit_text.endswith("\n")
|
||||
)
|
||||
return "".join(out)
|
||||
|
||||
return "".join(out)
|
||||
|
||||
def flush(self) -> str:
|
||||
"""End-of-stream flush.
|
||||
|
||||
If still inside an unterminated block, held-back content is
|
||||
discarded — leaking partial reasoning is worse than a
|
||||
truncated answer. Otherwise the held-back partial-tag tail is
|
||||
emitted verbatim (it turned out not to be a real tag prefix).
|
||||
"""
|
||||
if self._in_block:
|
||||
self._buf = ""
|
||||
self._in_block = False
|
||||
return ""
|
||||
tail = self._buf
|
||||
self._buf = ""
|
||||
if not tail:
|
||||
return ""
|
||||
tail = self._strip_orphan_close_tags(tail)
|
||||
if tail:
|
||||
self._last_emitted_ended_newline = tail.endswith("\n")
|
||||
return tail
|
||||
|
||||
# ── internal helpers ───────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _find_first_tag(
|
||||
buf: str, tags: Tuple[str, ...],
|
||||
) -> Tuple[int, int]:
|
||||
"""Return (earliest_index, tag_length) over *tags*, or (-1, 0).
|
||||
|
||||
Case-insensitive match.
|
||||
"""
|
||||
buf_lower = buf.lower()
|
||||
best_idx = -1
|
||||
best_len = 0
|
||||
for tag in tags:
|
||||
idx = buf_lower.find(tag.lower())
|
||||
if idx != -1 and (best_idx == -1 or idx < best_idx):
|
||||
best_idx = idx
|
||||
best_len = len(tag)
|
||||
return best_idx, best_len
|
||||
|
||||
def _find_earliest_closed_pair(self, buf: str):
|
||||
"""Return (start_idx, end_idx) of the earliest closed pair, else None.
|
||||
|
||||
A closed pair is ``<tag>...</tag>`` of any variant. Matches are
|
||||
case-insensitive and non-greedy (the closest close tag after
|
||||
an open tag wins), matching the regex ``<tag>.*?</tag>``
|
||||
semantics of ``_strip_think_blocks`` case 1. When two tag
|
||||
variants could both match, the one whose open tag appears
|
||||
earlier wins.
|
||||
"""
|
||||
buf_lower = buf.lower()
|
||||
best: "tuple[int, int] | None" = None
|
||||
for open_tag, close_tag in zip(self._OPEN_TAGS, self._CLOSE_TAGS):
|
||||
open_lower = open_tag.lower()
|
||||
close_lower = close_tag.lower()
|
||||
open_idx = buf_lower.find(open_lower)
|
||||
if open_idx == -1:
|
||||
continue
|
||||
close_idx = buf_lower.find(
|
||||
close_lower, open_idx + len(open_lower),
|
||||
)
|
||||
if close_idx == -1:
|
||||
continue
|
||||
end_idx = close_idx + len(close_lower)
|
||||
if best is None or open_idx < best[0]:
|
||||
best = (open_idx, end_idx)
|
||||
return best
|
||||
|
||||
def _find_open_at_boundary(
|
||||
self, buf: str, already_emitted: list[str],
|
||||
) -> Tuple[int, int]:
|
||||
"""Return the earliest block-boundary open-tag (idx, len).
|
||||
|
||||
Returns (-1, 0) if no boundary-legal opener is present.
|
||||
"""
|
||||
buf_lower = buf.lower()
|
||||
best_idx = -1
|
||||
best_len = 0
|
||||
for tag in self._OPEN_TAGS:
|
||||
tag_lower = tag.lower()
|
||||
search_start = 0
|
||||
while True:
|
||||
idx = buf_lower.find(tag_lower, search_start)
|
||||
if idx == -1:
|
||||
break
|
||||
if self._is_block_boundary(buf, idx, already_emitted):
|
||||
if best_idx == -1 or idx < best_idx:
|
||||
best_idx = idx
|
||||
best_len = len(tag)
|
||||
break # first boundary hit for this tag is enough
|
||||
search_start = idx + 1
|
||||
return best_idx, best_len
|
||||
|
||||
def _is_block_boundary(
|
||||
self, buf: str, idx: int, already_emitted: list[str],
|
||||
) -> bool:
|
||||
"""True iff position *idx* in *buf* is a block boundary.
|
||||
|
||||
A block boundary is:
|
||||
- buf position 0 AND the most recent emission ended with
|
||||
a newline (or nothing has been emitted yet)
|
||||
- any position whose preceding text on the current line
|
||||
(since the last newline in buf) is whitespace-only, AND
|
||||
if there is no newline in the preceding buf portion, the
|
||||
most recent prior emission ended with a newline
|
||||
"""
|
||||
if idx == 0:
|
||||
# Check whether the last already-emitted chunk in THIS
|
||||
# feed() call ended with a newline, otherwise fall back
|
||||
# to the cross-feed flag.
|
||||
if already_emitted:
|
||||
return already_emitted[-1].endswith("\n")
|
||||
return self._last_emitted_ended_newline
|
||||
preceding = buf[:idx]
|
||||
last_nl = preceding.rfind("\n")
|
||||
if last_nl == -1:
|
||||
# No newline in buf before the tag — boundary only if the
|
||||
# prior emission ended with a newline AND everything since
|
||||
# is whitespace.
|
||||
if already_emitted:
|
||||
prior_newline = already_emitted[-1].endswith("\n")
|
||||
else:
|
||||
prior_newline = self._last_emitted_ended_newline
|
||||
return prior_newline and preceding.strip() == ""
|
||||
# Newline present — text between it and the tag must be
|
||||
# whitespace-only.
|
||||
return preceding[last_nl + 1:].strip() == ""
|
||||
|
||||
@classmethod
|
||||
def _max_partial_suffix(
|
||||
cls, buf: str, tags: Tuple[str, ...],
|
||||
) -> int:
|
||||
"""Return the longest buf-suffix that is a prefix of any tag.
|
||||
|
||||
Only prefixes strictly shorter than the tag itself count
|
||||
(full-length suffixes are the tag and are handled as matches,
|
||||
not held-back partials). Case-insensitive.
|
||||
"""
|
||||
if not buf:
|
||||
return 0
|
||||
buf_lower = buf.lower()
|
||||
max_check = min(len(buf_lower), cls._MAX_TAG_LEN - 1)
|
||||
for i in range(max_check, 0, -1):
|
||||
suffix = buf_lower[-i:]
|
||||
for tag in tags:
|
||||
tag_lower = tag.lower()
|
||||
if len(tag_lower) > i and tag_lower.startswith(suffix):
|
||||
return i
|
||||
return 0
|
||||
|
||||
@classmethod
|
||||
def _strip_orphan_close_tags(cls, text: str) -> str:
|
||||
"""Remove any close tags from *text* (orphan-close handling).
|
||||
|
||||
An orphan close tag has no matching open in the current
|
||||
scrubber state; it's always noise, stripped with any trailing
|
||||
whitespace so the surrounding prose flows naturally.
|
||||
"""
|
||||
if "</" not in text:
|
||||
return text
|
||||
text_lower = text.lower()
|
||||
out: list[str] = []
|
||||
i = 0
|
||||
while i < len(text):
|
||||
matched = False
|
||||
if text_lower[i:i + 2] == "</":
|
||||
for tag in cls._CLOSE_TAGS:
|
||||
tag_lower = tag.lower()
|
||||
tag_len = len(tag_lower)
|
||||
if text_lower[i:i + tag_len] == tag_lower:
|
||||
# Skip the tag and any trailing whitespace,
|
||||
# matching _strip_think_blocks case 3.
|
||||
j = i + tag_len
|
||||
while j < len(text) and text[j] in " \t\n\r":
|
||||
j += 1
|
||||
i = j
|
||||
matched = True
|
||||
break
|
||||
if not matched:
|
||||
out.append(text[i])
|
||||
i += 1
|
||||
return "".join(out)
|
||||
@@ -6,19 +6,12 @@ adds latency to the user-facing reply.
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from typing import Callable, Optional
|
||||
from typing import Optional
|
||||
|
||||
from agent.auxiliary_client import call_llm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Callback signature: (task_name, exception) -> None. Used to surface
|
||||
# auxiliary failures to the user through AIAgent._emit_auxiliary_failure
|
||||
# so silent-drops (e.g. OpenRouter 402 exhausting the fallback chain)
|
||||
# become visible instead of piling up as NULL session titles.
|
||||
FailureCallback = Callable[[str, BaseException], None]
|
||||
TitleCallback = Callable[[str], None]
|
||||
|
||||
_TITLE_PROMPT = (
|
||||
"Generate a short, descriptive title (3-7 words) for a conversation that starts with the "
|
||||
"following exchange. The title should capture the main topic or intent. "
|
||||
@@ -26,23 +19,11 @@ _TITLE_PROMPT = (
|
||||
)
|
||||
|
||||
|
||||
def generate_title(
|
||||
user_message: str,
|
||||
assistant_response: str,
|
||||
timeout: float = 30.0,
|
||||
failure_callback: Optional[FailureCallback] = None,
|
||||
main_runtime: dict = None,
|
||||
) -> Optional[str]:
|
||||
def generate_title(user_message: str, assistant_response: str, timeout: float = 30.0) -> Optional[str]:
|
||||
"""Generate a session title from the first exchange.
|
||||
|
||||
Uses the main runtime's model when available, falling back to the
|
||||
auxiliary LLM client (cheapest/fastest available model).
|
||||
Uses the auxiliary LLM client (cheapest/fastest available model).
|
||||
Returns the title string or None on failure.
|
||||
|
||||
``failure_callback`` is invoked with ``(task, exception)`` when the
|
||||
auxiliary call raises — the caller typically wires this to
|
||||
``AIAgent._emit_auxiliary_failure`` so the user sees a warning instead
|
||||
of silently accumulating untitled sessions.
|
||||
"""
|
||||
# Truncate long messages to keep the request small
|
||||
user_snippet = user_message[:500] if user_message else ""
|
||||
@@ -55,12 +36,11 @@ def generate_title(
|
||||
|
||||
try:
|
||||
response = call_llm(
|
||||
task="title_generation",
|
||||
task="compression", # reuse compression task config (cheap/fast model)
|
||||
messages=messages,
|
||||
max_tokens=500,
|
||||
max_tokens=30,
|
||||
temperature=0.3,
|
||||
timeout=timeout,
|
||||
main_runtime=main_runtime,
|
||||
)
|
||||
title = (response.choices[0].message.content or "").strip()
|
||||
# Clean up: remove quotes, trailing punctuation, prefixes like "Title: "
|
||||
@@ -72,15 +52,7 @@ def generate_title(
|
||||
title = title[:77] + "..."
|
||||
return title if title else None
|
||||
except Exception as e:
|
||||
# Log at WARNING so this shows up in agent.log without debug mode.
|
||||
# Full detail at debug level for operators who need the stack.
|
||||
logger.warning("Title generation failed: %s", e)
|
||||
logger.debug("Title generation traceback", exc_info=True)
|
||||
if failure_callback is not None:
|
||||
try:
|
||||
failure_callback("title generation", e)
|
||||
except Exception:
|
||||
logger.debug("Title generation failure_callback raised", exc_info=True)
|
||||
logger.debug("Title generation failed: %s", e)
|
||||
return None
|
||||
|
||||
|
||||
@@ -89,9 +61,6 @@ def auto_title_session(
|
||||
session_id: str,
|
||||
user_message: str,
|
||||
assistant_response: str,
|
||||
failure_callback: Optional[FailureCallback] = None,
|
||||
main_runtime: dict = None,
|
||||
title_callback: Optional[TitleCallback] = None,
|
||||
) -> None:
|
||||
"""Generate and set a session title if one doesn't already exist.
|
||||
|
||||
@@ -112,20 +81,13 @@ def auto_title_session(
|
||||
except Exception:
|
||||
return
|
||||
|
||||
title = generate_title(
|
||||
user_message, assistant_response, failure_callback=failure_callback, main_runtime=main_runtime
|
||||
)
|
||||
title = generate_title(user_message, assistant_response)
|
||||
if not title:
|
||||
return
|
||||
|
||||
try:
|
||||
session_db.set_session_title(session_id, title)
|
||||
logger.debug("Auto-generated session title: %s", title)
|
||||
if title_callback is not None:
|
||||
try:
|
||||
title_callback(title)
|
||||
except Exception:
|
||||
logger.debug("Auto-title callback failed", exc_info=True)
|
||||
except Exception as e:
|
||||
logger.debug("Failed to set auto-generated title: %s", e)
|
||||
|
||||
@@ -136,9 +98,6 @@ def maybe_auto_title(
|
||||
user_message: str,
|
||||
assistant_response: str,
|
||||
conversation_history: list,
|
||||
failure_callback: Optional[FailureCallback] = None,
|
||||
main_runtime: dict = None,
|
||||
title_callback: Optional[TitleCallback] = None,
|
||||
) -> None:
|
||||
"""Fire-and-forget title generation after the first exchange.
|
||||
|
||||
@@ -160,11 +119,6 @@ def maybe_auto_title(
|
||||
thread = threading.Thread(
|
||||
target=auto_title_session,
|
||||
args=(session_db, session_id, user_message, assistant_response),
|
||||
kwargs={
|
||||
"failure_callback": failure_callback,
|
||||
"main_runtime": main_runtime,
|
||||
"title_callback": title_callback,
|
||||
},
|
||||
daemon=True,
|
||||
name="auto-title",
|
||||
)
|
||||
|
||||
@@ -1,455 +0,0 @@
|
||||
"""Pure tool-call loop guardrail primitives.
|
||||
|
||||
The controller in this module is intentionally side-effect free: it tracks
|
||||
per-turn tool-call observations and returns decisions. Runtime code owns whether
|
||||
those decisions become warning guidance, synthetic tool results, or controlled
|
||||
turn halts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Mapping
|
||||
|
||||
from utils import safe_json_loads
|
||||
|
||||
|
||||
IDEMPOTENT_TOOL_NAMES = frozenset(
|
||||
{
|
||||
"read_file",
|
||||
"search_files",
|
||||
"web_search",
|
||||
"web_extract",
|
||||
"session_search",
|
||||
"browser_snapshot",
|
||||
"browser_console",
|
||||
"browser_get_images",
|
||||
"mcp_filesystem_read_file",
|
||||
"mcp_filesystem_read_text_file",
|
||||
"mcp_filesystem_read_multiple_files",
|
||||
"mcp_filesystem_list_directory",
|
||||
"mcp_filesystem_list_directory_with_sizes",
|
||||
"mcp_filesystem_directory_tree",
|
||||
"mcp_filesystem_get_file_info",
|
||||
"mcp_filesystem_search_files",
|
||||
}
|
||||
)
|
||||
|
||||
MUTATING_TOOL_NAMES = frozenset(
|
||||
{
|
||||
"terminal",
|
||||
"execute_code",
|
||||
"write_file",
|
||||
"patch",
|
||||
"todo",
|
||||
"memory",
|
||||
"skill_manage",
|
||||
"browser_click",
|
||||
"browser_type",
|
||||
"browser_press",
|
||||
"browser_scroll",
|
||||
"browser_navigate",
|
||||
"send_message",
|
||||
"cronjob",
|
||||
"delegate_task",
|
||||
"process",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolCallGuardrailConfig:
|
||||
"""Thresholds for per-turn tool-call loop detection.
|
||||
|
||||
Warnings are enabled by default and never prevent tool execution. Hard stops
|
||||
are explicit opt-in so interactive CLI/TUI sessions get a gentle nudge unless
|
||||
the user enables circuit-breaker behavior in config.yaml.
|
||||
"""
|
||||
|
||||
warnings_enabled: bool = True
|
||||
hard_stop_enabled: bool = False
|
||||
exact_failure_warn_after: int = 2
|
||||
exact_failure_block_after: int = 5
|
||||
same_tool_failure_warn_after: int = 3
|
||||
same_tool_failure_halt_after: int = 8
|
||||
no_progress_warn_after: int = 2
|
||||
no_progress_block_after: int = 5
|
||||
idempotent_tools: frozenset[str] = field(default_factory=lambda: IDEMPOTENT_TOOL_NAMES)
|
||||
mutating_tools: frozenset[str] = field(default_factory=lambda: MUTATING_TOOL_NAMES)
|
||||
|
||||
@classmethod
|
||||
def from_mapping(cls, data: Mapping[str, Any] | None) -> "ToolCallGuardrailConfig":
|
||||
"""Build config from the `tool_loop_guardrails` config.yaml section."""
|
||||
if not isinstance(data, Mapping):
|
||||
return cls()
|
||||
|
||||
warn_after = data.get("warn_after")
|
||||
if not isinstance(warn_after, Mapping):
|
||||
warn_after = {}
|
||||
hard_stop_after = data.get("hard_stop_after")
|
||||
if not isinstance(hard_stop_after, Mapping):
|
||||
hard_stop_after = {}
|
||||
|
||||
defaults = cls()
|
||||
return cls(
|
||||
warnings_enabled=_as_bool(data.get("warnings_enabled"), defaults.warnings_enabled),
|
||||
hard_stop_enabled=_as_bool(data.get("hard_stop_enabled"), defaults.hard_stop_enabled),
|
||||
exact_failure_warn_after=_positive_int(
|
||||
warn_after.get("exact_failure", data.get("exact_failure_warn_after")),
|
||||
defaults.exact_failure_warn_after,
|
||||
),
|
||||
same_tool_failure_warn_after=_positive_int(
|
||||
warn_after.get("same_tool_failure", data.get("same_tool_failure_warn_after")),
|
||||
defaults.same_tool_failure_warn_after,
|
||||
),
|
||||
no_progress_warn_after=_positive_int(
|
||||
warn_after.get("idempotent_no_progress", data.get("no_progress_warn_after")),
|
||||
defaults.no_progress_warn_after,
|
||||
),
|
||||
exact_failure_block_after=_positive_int(
|
||||
hard_stop_after.get("exact_failure", data.get("exact_failure_block_after")),
|
||||
defaults.exact_failure_block_after,
|
||||
),
|
||||
same_tool_failure_halt_after=_positive_int(
|
||||
hard_stop_after.get("same_tool_failure", data.get("same_tool_failure_halt_after")),
|
||||
defaults.same_tool_failure_halt_after,
|
||||
),
|
||||
no_progress_block_after=_positive_int(
|
||||
hard_stop_after.get("idempotent_no_progress", data.get("no_progress_block_after")),
|
||||
defaults.no_progress_block_after,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolCallSignature:
|
||||
"""Stable, non-reversible identity for a tool name plus canonical args."""
|
||||
|
||||
tool_name: str
|
||||
args_hash: str
|
||||
|
||||
@classmethod
|
||||
def from_call(cls, tool_name: str, args: Mapping[str, Any] | None) -> "ToolCallSignature":
|
||||
canonical = canonical_tool_args(args or {})
|
||||
return cls(tool_name=tool_name, args_hash=_sha256(canonical))
|
||||
|
||||
def to_metadata(self) -> dict[str, str]:
|
||||
"""Return public metadata without raw argument values."""
|
||||
return {"tool_name": self.tool_name, "args_hash": self.args_hash}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolGuardrailDecision:
|
||||
"""Decision returned by the tool-call guardrail controller."""
|
||||
|
||||
action: str = "allow" # allow | warn | block | halt
|
||||
code: str = "allow"
|
||||
message: str = ""
|
||||
tool_name: str = ""
|
||||
count: int = 0
|
||||
signature: ToolCallSignature | None = None
|
||||
|
||||
@property
|
||||
def allows_execution(self) -> bool:
|
||||
return self.action in {"allow", "warn"}
|
||||
|
||||
@property
|
||||
def should_halt(self) -> bool:
|
||||
return self.action in {"block", "halt"}
|
||||
|
||||
def to_metadata(self) -> dict[str, Any]:
|
||||
data: dict[str, Any] = {
|
||||
"action": self.action,
|
||||
"code": self.code,
|
||||
"message": self.message,
|
||||
"tool_name": self.tool_name,
|
||||
"count": self.count,
|
||||
}
|
||||
if self.signature is not None:
|
||||
data["signature"] = self.signature.to_metadata()
|
||||
return data
|
||||
|
||||
|
||||
def canonical_tool_args(args: Mapping[str, Any]) -> str:
|
||||
"""Return sorted compact JSON for parsed tool arguments."""
|
||||
if not isinstance(args, Mapping):
|
||||
raise TypeError(f"tool args must be a mapping, got {type(args).__name__}")
|
||||
return json.dumps(
|
||||
args,
|
||||
ensure_ascii=False,
|
||||
sort_keys=True,
|
||||
separators=(",", ":"),
|
||||
default=str,
|
||||
)
|
||||
|
||||
|
||||
def classify_tool_failure(tool_name: str, result: str | None) -> tuple[bool, str]:
|
||||
"""Safety-fallback classifier used only when callers don't pass ``failed``.
|
||||
|
||||
Mirrors ``agent.display._detect_tool_failure`` exactly so the guardrail
|
||||
never disagrees with the CLI's user-visible ``[error]`` tag. Production
|
||||
callers in ``run_agent.py`` always pass an explicit ``failed=`` derived
|
||||
from ``_detect_tool_failure``; this function exists so standalone callers
|
||||
(tests, tooling) still get consistent behavior.
|
||||
"""
|
||||
if result is None:
|
||||
return False, ""
|
||||
|
||||
if tool_name == "terminal":
|
||||
data = safe_json_loads(result)
|
||||
if isinstance(data, dict):
|
||||
exit_code = data.get("exit_code")
|
||||
if exit_code is not None and exit_code != 0:
|
||||
return True, f" [exit {exit_code}]"
|
||||
return False, ""
|
||||
|
||||
if tool_name == "memory":
|
||||
data = safe_json_loads(result)
|
||||
if isinstance(data, dict):
|
||||
if data.get("success") is False and "exceed the limit" in data.get("error", ""):
|
||||
return True, " [full]"
|
||||
|
||||
lower = result[:500].lower()
|
||||
if '"error"' in lower or '"failed"' in lower or result.startswith("Error"):
|
||||
return True, " [error]"
|
||||
|
||||
return False, ""
|
||||
|
||||
|
||||
class ToolCallGuardrailController:
|
||||
"""Per-turn controller for repeated failed/non-progressing tool calls."""
|
||||
|
||||
def __init__(self, config: ToolCallGuardrailConfig | None = None):
|
||||
self.config = config or ToolCallGuardrailConfig()
|
||||
self.reset_for_turn()
|
||||
|
||||
def reset_for_turn(self) -> None:
|
||||
self._exact_failure_counts: dict[ToolCallSignature, int] = {}
|
||||
self._same_tool_failure_counts: dict[str, int] = {}
|
||||
self._no_progress: dict[ToolCallSignature, tuple[str, int]] = {}
|
||||
self._halt_decision: ToolGuardrailDecision | None = None
|
||||
|
||||
@property
|
||||
def halt_decision(self) -> ToolGuardrailDecision | None:
|
||||
return self._halt_decision
|
||||
|
||||
def before_call(self, tool_name: str, args: Mapping[str, Any] | None) -> ToolGuardrailDecision:
|
||||
signature = ToolCallSignature.from_call(tool_name, _coerce_args(args))
|
||||
if not self.config.hard_stop_enabled:
|
||||
return ToolGuardrailDecision(tool_name=tool_name, signature=signature)
|
||||
|
||||
exact_count = self._exact_failure_counts.get(signature, 0)
|
||||
if exact_count >= self.config.exact_failure_block_after:
|
||||
decision = ToolGuardrailDecision(
|
||||
action="block",
|
||||
code="repeated_exact_failure_block",
|
||||
message=(
|
||||
f"Blocked {tool_name}: the same tool call failed {exact_count} "
|
||||
"times with identical arguments. Stop retrying it unchanged; "
|
||||
"change strategy or explain the blocker."
|
||||
),
|
||||
tool_name=tool_name,
|
||||
count=exact_count,
|
||||
signature=signature,
|
||||
)
|
||||
self._halt_decision = decision
|
||||
return decision
|
||||
|
||||
if self._is_idempotent(tool_name):
|
||||
record = self._no_progress.get(signature)
|
||||
if record is not None:
|
||||
_result_hash, repeat_count = record
|
||||
if repeat_count >= self.config.no_progress_block_after:
|
||||
decision = ToolGuardrailDecision(
|
||||
action="block",
|
||||
code="idempotent_no_progress_block",
|
||||
message=(
|
||||
f"Blocked {tool_name}: this read-only call returned the same "
|
||||
f"result {repeat_count} times. Stop repeating it unchanged; "
|
||||
"use the result already provided or try a different query."
|
||||
),
|
||||
tool_name=tool_name,
|
||||
count=repeat_count,
|
||||
signature=signature,
|
||||
)
|
||||
self._halt_decision = decision
|
||||
return decision
|
||||
|
||||
return ToolGuardrailDecision(tool_name=tool_name, signature=signature)
|
||||
|
||||
def after_call(
|
||||
self,
|
||||
tool_name: str,
|
||||
args: Mapping[str, Any] | None,
|
||||
result: str | None,
|
||||
*,
|
||||
failed: bool | None = None,
|
||||
) -> ToolGuardrailDecision:
|
||||
args = _coerce_args(args)
|
||||
signature = ToolCallSignature.from_call(tool_name, args)
|
||||
if failed is None:
|
||||
failed, _ = classify_tool_failure(tool_name, result)
|
||||
|
||||
if failed:
|
||||
exact_count = self._exact_failure_counts.get(signature, 0) + 1
|
||||
self._exact_failure_counts[signature] = exact_count
|
||||
self._no_progress.pop(signature, None)
|
||||
|
||||
same_count = self._same_tool_failure_counts.get(tool_name, 0) + 1
|
||||
self._same_tool_failure_counts[tool_name] = same_count
|
||||
|
||||
if self.config.hard_stop_enabled and same_count >= self.config.same_tool_failure_halt_after:
|
||||
decision = ToolGuardrailDecision(
|
||||
action="halt",
|
||||
code="same_tool_failure_halt",
|
||||
message=(
|
||||
f"Stopped {tool_name}: it failed {same_count} times this turn. "
|
||||
"Stop retrying the same failing tool path and choose a different approach."
|
||||
),
|
||||
tool_name=tool_name,
|
||||
count=same_count,
|
||||
signature=signature,
|
||||
)
|
||||
self._halt_decision = decision
|
||||
return decision
|
||||
|
||||
if self.config.warnings_enabled and exact_count >= self.config.exact_failure_warn_after:
|
||||
return ToolGuardrailDecision(
|
||||
action="warn",
|
||||
code="repeated_exact_failure_warning",
|
||||
message=(
|
||||
f"{tool_name} has failed {exact_count} times with identical arguments. "
|
||||
"This looks like a loop; inspect the error and change strategy "
|
||||
"instead of retrying it unchanged."
|
||||
),
|
||||
tool_name=tool_name,
|
||||
count=exact_count,
|
||||
signature=signature,
|
||||
)
|
||||
|
||||
if self.config.warnings_enabled and same_count >= self.config.same_tool_failure_warn_after:
|
||||
return ToolGuardrailDecision(
|
||||
action="warn",
|
||||
code="same_tool_failure_warning",
|
||||
message=(
|
||||
f"{tool_name} has failed {same_count} times this turn. "
|
||||
"This looks like a loop; change approach before retrying."
|
||||
),
|
||||
tool_name=tool_name,
|
||||
count=same_count,
|
||||
signature=signature,
|
||||
)
|
||||
|
||||
return ToolGuardrailDecision(tool_name=tool_name, count=exact_count, signature=signature)
|
||||
|
||||
self._exact_failure_counts.pop(signature, None)
|
||||
self._same_tool_failure_counts.pop(tool_name, None)
|
||||
|
||||
if not self._is_idempotent(tool_name):
|
||||
self._no_progress.pop(signature, None)
|
||||
return ToolGuardrailDecision(tool_name=tool_name, signature=signature)
|
||||
|
||||
result_hash = _result_hash(result)
|
||||
previous = self._no_progress.get(signature)
|
||||
repeat_count = 1
|
||||
if previous is not None and previous[0] == result_hash:
|
||||
repeat_count = previous[1] + 1
|
||||
self._no_progress[signature] = (result_hash, repeat_count)
|
||||
|
||||
if self.config.warnings_enabled and repeat_count >= self.config.no_progress_warn_after:
|
||||
return ToolGuardrailDecision(
|
||||
action="warn",
|
||||
code="idempotent_no_progress_warning",
|
||||
message=(
|
||||
f"{tool_name} returned the same result {repeat_count} times. "
|
||||
"Use the result already provided or change the query instead of "
|
||||
"repeating it unchanged."
|
||||
),
|
||||
tool_name=tool_name,
|
||||
count=repeat_count,
|
||||
signature=signature,
|
||||
)
|
||||
|
||||
return ToolGuardrailDecision(tool_name=tool_name, count=repeat_count, signature=signature)
|
||||
|
||||
def _is_idempotent(self, tool_name: str) -> bool:
|
||||
if tool_name in self.config.mutating_tools:
|
||||
return False
|
||||
return tool_name in self.config.idempotent_tools
|
||||
|
||||
|
||||
def toolguard_synthetic_result(decision: ToolGuardrailDecision) -> str:
|
||||
"""Build a synthetic role=tool content string for a blocked tool call."""
|
||||
return json.dumps(
|
||||
{
|
||||
"error": decision.message,
|
||||
"guardrail": decision.to_metadata(),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
|
||||
def append_toolguard_guidance(result: str, decision: ToolGuardrailDecision) -> str:
|
||||
"""Append runtime guidance to the current tool result content."""
|
||||
if decision.action not in {"warn", "halt"} or not decision.message:
|
||||
return result
|
||||
label = "Tool loop hard stop" if decision.action == "halt" else "Tool loop warning"
|
||||
suffix = (
|
||||
f"\n\n[{label}: "
|
||||
f"{decision.code}; count={decision.count}; {decision.message}]"
|
||||
)
|
||||
return (result or "") + suffix
|
||||
|
||||
|
||||
def _coerce_args(args: Mapping[str, Any] | None) -> Mapping[str, Any]:
|
||||
return args if isinstance(args, Mapping) else {}
|
||||
|
||||
|
||||
def _result_hash(result: str | None) -> str:
|
||||
parsed = safe_json_loads(result or "")
|
||||
if parsed is not None:
|
||||
try:
|
||||
canonical = json.dumps(
|
||||
parsed,
|
||||
ensure_ascii=False,
|
||||
sort_keys=True,
|
||||
separators=(",", ":"),
|
||||
default=str,
|
||||
)
|
||||
except TypeError:
|
||||
canonical = str(parsed)
|
||||
else:
|
||||
canonical = result or ""
|
||||
return _sha256(canonical)
|
||||
|
||||
|
||||
def _as_bool(value: Any, default: bool) -> bool:
|
||||
if value is None:
|
||||
return default
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, (int, float)):
|
||||
return bool(value)
|
||||
if isinstance(value, str):
|
||||
lowered = value.strip().lower()
|
||||
if lowered in {"1", "true", "yes", "on", "enabled"}:
|
||||
return True
|
||||
if lowered in {"0", "false", "no", "off", "disabled"}:
|
||||
return False
|
||||
return default
|
||||
|
||||
|
||||
def _positive_int(value: Any, default: int) -> int:
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
parsed = int(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
return parsed if parsed >= 1 else default
|
||||
|
||||
|
||||
def _sha256(value: str) -> str:
|
||||
return hashlib.sha256(value.encode("utf-8")).hexdigest()
|
||||
@@ -1,68 +0,0 @@
|
||||
"""Transport layer types and registry for provider response normalization.
|
||||
|
||||
Usage:
|
||||
from agent.transports import get_transport
|
||||
transport = get_transport("anthropic_messages")
|
||||
result = transport.normalize_response(raw_response)
|
||||
"""
|
||||
|
||||
from agent.transports.types import (
|
||||
NormalizedResponse,
|
||||
ToolCall,
|
||||
Usage,
|
||||
build_tool_call,
|
||||
map_finish_reason,
|
||||
) # noqa: F401
|
||||
|
||||
_REGISTRY: dict = {}
|
||||
_discovered: bool = False
|
||||
|
||||
|
||||
def register_transport(api_mode: str, transport_cls: type) -> None:
|
||||
"""Register a transport class for an api_mode string."""
|
||||
_REGISTRY[api_mode] = transport_cls
|
||||
|
||||
|
||||
def get_transport(api_mode: str):
|
||||
"""Get a transport instance for the given api_mode.
|
||||
|
||||
Returns None if no transport is registered for this api_mode.
|
||||
This allows gradual migration — call sites can check for None
|
||||
and fall back to the legacy code path.
|
||||
"""
|
||||
global _discovered
|
||||
if not _discovered:
|
||||
_discover_transports()
|
||||
cls = _REGISTRY.get(api_mode)
|
||||
if cls is None:
|
||||
# The registry can be partially populated when a specific transport
|
||||
# module was imported directly (for example chat_completions before
|
||||
# codex). Discover on misses, not only when the registry is empty, so
|
||||
# test/order-dependent imports do not make valid api_modes unavailable.
|
||||
_discover_transports()
|
||||
cls = _REGISTRY.get(api_mode)
|
||||
if cls is None:
|
||||
return None
|
||||
return cls()
|
||||
|
||||
|
||||
def _discover_transports() -> None:
|
||||
"""Import all transport modules to trigger auto-registration."""
|
||||
global _discovered
|
||||
_discovered = True
|
||||
try:
|
||||
import agent.transports.anthropic # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.codex # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.chat_completions # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.bedrock # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
@@ -1,179 +0,0 @@
|
||||
"""Anthropic Messages API transport.
|
||||
|
||||
Delegates to the existing adapter functions in agent/anthropic_adapter.py.
|
||||
This transport owns format conversion and normalization — NOT client lifecycle.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class AnthropicTransport(ProviderTransport):
|
||||
"""Transport for api_mode='anthropic_messages'.
|
||||
|
||||
Wraps the existing functions in anthropic_adapter.py behind the
|
||||
ProviderTransport ABC. Each method delegates — no logic is duplicated.
|
||||
"""
|
||||
|
||||
@property
|
||||
def api_mode(self) -> str:
|
||||
return "anthropic_messages"
|
||||
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
"""Convert OpenAI messages to Anthropic (system, messages) tuple.
|
||||
|
||||
kwargs:
|
||||
base_url: Optional[str] — affects thinking signature handling.
|
||||
"""
|
||||
from agent.anthropic_adapter import convert_messages_to_anthropic
|
||||
|
||||
base_url = kwargs.get("base_url")
|
||||
return convert_messages_to_anthropic(messages, base_url=base_url)
|
||||
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
"""Convert OpenAI tool schemas to Anthropic input_schema format."""
|
||||
from agent.anthropic_adapter import convert_tools_to_anthropic
|
||||
|
||||
return convert_tools_to_anthropic(tools)
|
||||
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build Anthropic messages.create() kwargs.
|
||||
|
||||
Calls convert_messages and convert_tools internally.
|
||||
|
||||
params (all optional):
|
||||
max_tokens: int
|
||||
reasoning_config: dict | None
|
||||
tool_choice: str | None
|
||||
is_oauth: bool
|
||||
preserve_dots: bool
|
||||
context_length: int | None
|
||||
base_url: str | None
|
||||
fast_mode: bool
|
||||
drop_context_1m_beta: bool
|
||||
"""
|
||||
from agent.anthropic_adapter import build_anthropic_kwargs
|
||||
|
||||
return build_anthropic_kwargs(
|
||||
model=model,
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
max_tokens=params.get("max_tokens", 16384),
|
||||
reasoning_config=params.get("reasoning_config"),
|
||||
tool_choice=params.get("tool_choice"),
|
||||
is_oauth=params.get("is_oauth", False),
|
||||
preserve_dots=params.get("preserve_dots", False),
|
||||
context_length=params.get("context_length"),
|
||||
base_url=params.get("base_url"),
|
||||
fast_mode=params.get("fast_mode", False),
|
||||
drop_context_1m_beta=params.get("drop_context_1m_beta", False),
|
||||
)
|
||||
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
"""Normalize Anthropic response to NormalizedResponse.
|
||||
|
||||
Parses content blocks (text, thinking, tool_use), maps stop_reason
|
||||
to OpenAI finish_reason, and collects reasoning_details in provider_data.
|
||||
"""
|
||||
import json
|
||||
from agent.anthropic_adapter import _to_plain_data
|
||||
from agent.transports.types import ToolCall
|
||||
|
||||
strip_tool_prefix = kwargs.get("strip_tool_prefix", False)
|
||||
_MCP_PREFIX = "mcp_"
|
||||
|
||||
text_parts = []
|
||||
reasoning_parts = []
|
||||
reasoning_details = []
|
||||
tool_calls = []
|
||||
|
||||
for block in response.content:
|
||||
if block.type == "text":
|
||||
text_parts.append(block.text)
|
||||
elif block.type == "thinking":
|
||||
reasoning_parts.append(block.thinking)
|
||||
block_dict = _to_plain_data(block)
|
||||
if isinstance(block_dict, dict):
|
||||
reasoning_details.append(block_dict)
|
||||
elif block.type == "tool_use":
|
||||
name = block.name
|
||||
if strip_tool_prefix and name.startswith(_MCP_PREFIX):
|
||||
name = name[len(_MCP_PREFIX):]
|
||||
tool_calls.append(
|
||||
ToolCall(
|
||||
id=block.id,
|
||||
name=name,
|
||||
arguments=json.dumps(block.input),
|
||||
)
|
||||
)
|
||||
|
||||
finish_reason = self._STOP_REASON_MAP.get(response.stop_reason, "stop")
|
||||
|
||||
provider_data = {}
|
||||
if reasoning_details:
|
||||
provider_data["reasoning_details"] = reasoning_details
|
||||
|
||||
return NormalizedResponse(
|
||||
content="\n".join(text_parts) if text_parts else None,
|
||||
tool_calls=tool_calls or None,
|
||||
finish_reason=finish_reason,
|
||||
reasoning="\n\n".join(reasoning_parts) if reasoning_parts else None,
|
||||
usage=None,
|
||||
provider_data=provider_data or None,
|
||||
)
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
"""Check Anthropic response structure is valid.
|
||||
|
||||
An empty content list is legitimate when ``stop_reason == "end_turn"``
|
||||
— the model's canonical way of signalling "nothing more to add" after
|
||||
a tool turn that already delivered the user-facing text. Treating it
|
||||
as invalid falsely retries a completed response.
|
||||
"""
|
||||
if response is None:
|
||||
return False
|
||||
content_blocks = getattr(response, "content", None)
|
||||
if not isinstance(content_blocks, list):
|
||||
return False
|
||||
if not content_blocks:
|
||||
return getattr(response, "stop_reason", None) == "end_turn"
|
||||
return True
|
||||
|
||||
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
|
||||
"""Extract Anthropic cache_read and cache_creation token counts."""
|
||||
usage = getattr(response, "usage", None)
|
||||
if usage is None:
|
||||
return None
|
||||
cached = getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
written = getattr(usage, "cache_creation_input_tokens", 0) or 0
|
||||
if cached or written:
|
||||
return {"cached_tokens": cached, "creation_tokens": written}
|
||||
return None
|
||||
|
||||
# Promote the adapter's canonical mapping to module level so it's shared
|
||||
_STOP_REASON_MAP = {
|
||||
"end_turn": "stop",
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"refusal": "content_filter",
|
||||
"model_context_window_exceeded": "length",
|
||||
}
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
"""Map Anthropic stop_reason to OpenAI finish_reason."""
|
||||
return self._STOP_REASON_MAP.get(raw_reason, "stop")
|
||||
|
||||
|
||||
# Auto-register on import
|
||||
from agent.transports import register_transport # noqa: E402
|
||||
|
||||
register_transport("anthropic_messages", AnthropicTransport)
|
||||
@@ -1,89 +0,0 @@
|
||||
"""Abstract base for provider transports.
|
||||
|
||||
A transport owns the data path for one api_mode:
|
||||
convert_messages → convert_tools → build_kwargs → normalize_response
|
||||
|
||||
It does NOT own: client construction, streaming, credential refresh,
|
||||
prompt caching, interrupt handling, or retry logic. Those stay on AIAgent.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class ProviderTransport(ABC):
|
||||
"""Base class for provider-specific format conversion and normalization."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def api_mode(self) -> str:
|
||||
"""The api_mode string this transport handles (e.g. 'anthropic_messages')."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
"""Convert OpenAI-format messages to provider-native format.
|
||||
|
||||
Returns provider-specific structure (e.g. (system, messages) for Anthropic,
|
||||
or the messages list unchanged for chat_completions).
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
"""Convert OpenAI-format tool definitions to provider-native format.
|
||||
|
||||
Returns provider-specific tool list (e.g. Anthropic input_schema format).
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build the complete API call kwargs dict.
|
||||
|
||||
This is the primary entry point — it typically calls convert_messages()
|
||||
and convert_tools() internally, then adds model-specific config.
|
||||
|
||||
Returns a dict ready to be passed to the provider's SDK client.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
"""Normalize a raw provider response to the shared NormalizedResponse type.
|
||||
|
||||
This is the only method that returns a transport-layer type.
|
||||
"""
|
||||
...
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
"""Optional: check if the raw response is structurally valid.
|
||||
|
||||
Returns True if valid, False if the response should be treated as invalid.
|
||||
Default implementation always returns True.
|
||||
"""
|
||||
return True
|
||||
|
||||
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
|
||||
"""Optional: extract provider-specific cache hit/creation stats.
|
||||
|
||||
Returns dict with 'cached_tokens' and 'creation_tokens', or None.
|
||||
Default returns None.
|
||||
"""
|
||||
return None
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
"""Optional: map provider-specific stop reason to OpenAI equivalent.
|
||||
|
||||
Default returns the raw reason unchanged. Override for providers
|
||||
with different stop reason vocabularies.
|
||||
"""
|
||||
return raw_reason
|
||||
@@ -1,154 +0,0 @@
|
||||
"""AWS Bedrock Converse API transport.
|
||||
|
||||
Delegates to the existing adapter functions in agent/bedrock_adapter.py.
|
||||
Bedrock uses its own boto3 client (not the OpenAI SDK), so the transport
|
||||
owns format conversion and normalization, while client construction and
|
||||
boto3 calls stay on AIAgent.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse, ToolCall, Usage
|
||||
|
||||
|
||||
class BedrockTransport(ProviderTransport):
|
||||
"""Transport for api_mode='bedrock_converse'."""
|
||||
|
||||
@property
|
||||
def api_mode(self) -> str:
|
||||
return "bedrock_converse"
|
||||
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
"""Convert OpenAI messages to Bedrock Converse format."""
|
||||
from agent.bedrock_adapter import convert_messages_to_converse
|
||||
return convert_messages_to_converse(messages)
|
||||
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
"""Convert OpenAI tool schemas to Bedrock Converse toolConfig."""
|
||||
from agent.bedrock_adapter import convert_tools_to_converse
|
||||
return convert_tools_to_converse(tools)
|
||||
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build Bedrock converse() kwargs.
|
||||
|
||||
Calls convert_messages and convert_tools internally.
|
||||
|
||||
params:
|
||||
max_tokens: int — output token limit (default 4096)
|
||||
temperature: float | None
|
||||
guardrail_config: dict | None — Bedrock guardrails
|
||||
region: str — AWS region (default 'us-east-1')
|
||||
"""
|
||||
from agent.bedrock_adapter import build_converse_kwargs
|
||||
|
||||
region = params.get("region", "us-east-1")
|
||||
guardrail = params.get("guardrail_config")
|
||||
|
||||
kwargs = build_converse_kwargs(
|
||||
model=model,
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
max_tokens=params.get("max_tokens", 4096),
|
||||
temperature=params.get("temperature"),
|
||||
guardrail_config=guardrail,
|
||||
)
|
||||
# Sentinel keys for dispatch — agent pops these before the boto3 call
|
||||
kwargs["__bedrock_converse__"] = True
|
||||
kwargs["__bedrock_region__"] = region
|
||||
return kwargs
|
||||
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
"""Normalize Bedrock response to NormalizedResponse.
|
||||
|
||||
Handles two shapes:
|
||||
1. Raw boto3 dict (from direct converse() calls)
|
||||
2. Already-normalized SimpleNamespace with .choices (from dispatch site)
|
||||
"""
|
||||
from agent.bedrock_adapter import normalize_converse_response
|
||||
|
||||
# Normalize to OpenAI-compatible SimpleNamespace
|
||||
if hasattr(response, "choices") and response.choices:
|
||||
# Already normalized at dispatch site
|
||||
ns = response
|
||||
else:
|
||||
# Raw boto3 dict
|
||||
ns = normalize_converse_response(response)
|
||||
|
||||
choice = ns.choices[0]
|
||||
msg = choice.message
|
||||
finish_reason = choice.finish_reason or "stop"
|
||||
|
||||
tool_calls = None
|
||||
if msg.tool_calls:
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=tc.id,
|
||||
name=tc.function.name,
|
||||
arguments=tc.function.arguments,
|
||||
)
|
||||
for tc in msg.tool_calls
|
||||
]
|
||||
|
||||
usage = None
|
||||
if hasattr(ns, "usage") and ns.usage:
|
||||
u = ns.usage
|
||||
usage = Usage(
|
||||
prompt_tokens=getattr(u, "prompt_tokens", 0) or 0,
|
||||
completion_tokens=getattr(u, "completion_tokens", 0) or 0,
|
||||
total_tokens=getattr(u, "total_tokens", 0) or 0,
|
||||
)
|
||||
|
||||
reasoning = getattr(msg, "reasoning", None) or getattr(msg, "reasoning_content", None)
|
||||
|
||||
return NormalizedResponse(
|
||||
content=msg.content,
|
||||
tool_calls=tool_calls,
|
||||
finish_reason=finish_reason,
|
||||
reasoning=reasoning,
|
||||
usage=usage,
|
||||
)
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
"""Check Bedrock response structure.
|
||||
|
||||
After normalize_converse_response, the response has OpenAI-compatible
|
||||
.choices — same check as chat_completions.
|
||||
"""
|
||||
if response is None:
|
||||
return False
|
||||
# Raw Bedrock dict response — check for 'output' key
|
||||
if isinstance(response, dict):
|
||||
return "output" in response
|
||||
# Already-normalized SimpleNamespace
|
||||
if hasattr(response, "choices"):
|
||||
return bool(response.choices)
|
||||
return False
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
"""Map Bedrock stop reason to OpenAI finish_reason.
|
||||
|
||||
The adapter already does this mapping inside normalize_converse_response,
|
||||
so this is only used for direct access to raw responses.
|
||||
"""
|
||||
_MAP = {
|
||||
"end_turn": "stop",
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"guardrail_intervened": "content_filter",
|
||||
"content_filtered": "content_filter",
|
||||
}
|
||||
return _MAP.get(raw_reason, "stop")
|
||||
|
||||
|
||||
# Auto-register on import
|
||||
from agent.transports import register_transport # noqa: E402
|
||||
|
||||
register_transport("bedrock_converse", BedrockTransport)
|
||||
@@ -1,597 +0,0 @@
|
||||
"""OpenAI Chat Completions transport.
|
||||
|
||||
Handles the default api_mode ('chat_completions') used by ~16 OpenAI-compatible
|
||||
providers (OpenRouter, Nous, NVIDIA, Qwen, Ollama, DeepSeek, xAI, Kimi, etc.).
|
||||
|
||||
Messages and tools are already in OpenAI format — convert_messages and
|
||||
convert_tools are near-identity. The complexity lives in build_kwargs
|
||||
which has provider-specific conditionals for max_tokens defaults,
|
||||
reasoning configuration, temperature handling, and extra_body assembly.
|
||||
"""
|
||||
|
||||
import copy
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.lmstudio_reasoning import resolve_lmstudio_effort
|
||||
from agent.moonshot_schema import is_moonshot_model, sanitize_moonshot_tools
|
||||
from agent.prompt_builder import DEVELOPER_ROLE_MODELS
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse, ToolCall, Usage
|
||||
|
||||
|
||||
def _build_gemini_thinking_config(model: str, reasoning_config: dict | None) -> dict | None:
|
||||
"""Translate Hermes/OpenRouter-style reasoning config to Gemini thinkingConfig."""
|
||||
if reasoning_config is None or not isinstance(reasoning_config, dict):
|
||||
return None
|
||||
|
||||
normalized_model = (model or "").strip().lower()
|
||||
if normalized_model.startswith("google/"):
|
||||
normalized_model = normalized_model.split("/", 1)[1]
|
||||
|
||||
# ``thinking_config`` is a Gemini-only request parameter. The same
|
||||
# ``gemini`` provider also serves Gemma (and historically PaLM/Bard);
|
||||
# those reject the field with HTTP 400 "Unknown name 'thinking_config':
|
||||
# Cannot find field" — including the polite ``{"includeThoughts": False}``
|
||||
# form. Omit the field entirely on non-Gemini models. (#17426)
|
||||
if not normalized_model.startswith("gemini"):
|
||||
return None
|
||||
|
||||
if reasoning_config.get("enabled") is False:
|
||||
# Gemini can hide thought parts even when internal thinking still
|
||||
# happens; omit thinkingLevel to avoid model-specific validation quirks.
|
||||
return {"includeThoughts": False}
|
||||
|
||||
effort = str(reasoning_config.get("effort", "medium") or "medium").strip().lower()
|
||||
if effort == "none":
|
||||
return {"includeThoughts": False}
|
||||
|
||||
thinking_config: Dict[str, Any] = {"includeThoughts": True}
|
||||
|
||||
# Gemini 2.5 accepts thinkingBudget; don't guess a budget from Hermes'
|
||||
# coarse effort levels. ``includeThoughts`` alone is enough to surface
|
||||
# thought parts without risking request validation errors.
|
||||
if normalized_model.startswith("gemini-2.5-"):
|
||||
return thinking_config
|
||||
|
||||
if effort not in {"minimal", "low", "medium", "high", "xhigh"}:
|
||||
effort = "medium"
|
||||
|
||||
# Gemini 3 Flash documents low/medium/high thinking levels; Gemini 3 Pro
|
||||
# is stricter (low/high). Clamp Hermes' wider effort set to what each
|
||||
# family accepts so we never forward an undocumented level verbatim.
|
||||
if normalized_model.startswith(("gemini-3", "gemini-3.1")):
|
||||
if "flash" in normalized_model:
|
||||
if effort in {"minimal", "low"}:
|
||||
thinking_config["thinkingLevel"] = "low"
|
||||
elif effort in {"high", "xhigh"}:
|
||||
thinking_config["thinkingLevel"] = "high"
|
||||
else:
|
||||
thinking_config["thinkingLevel"] = "medium"
|
||||
elif "pro" in normalized_model:
|
||||
thinking_config["thinkingLevel"] = (
|
||||
"high" if effort in {"high", "xhigh"} else "low"
|
||||
)
|
||||
|
||||
return thinking_config
|
||||
|
||||
|
||||
def _snake_case_gemini_thinking_config(config: dict | None) -> dict | None:
|
||||
"""Convert Gemini thinking config keys to the OpenAI-compat field names."""
|
||||
if not isinstance(config, dict) or not config:
|
||||
return None
|
||||
|
||||
translated: Dict[str, Any] = {}
|
||||
if isinstance(config.get("includeThoughts"), bool):
|
||||
translated["include_thoughts"] = config["includeThoughts"]
|
||||
if isinstance(config.get("thinkingLevel"), str) and config["thinkingLevel"].strip():
|
||||
translated["thinking_level"] = config["thinkingLevel"].strip().lower()
|
||||
if isinstance(config.get("thinkingBudget"), (int, float)):
|
||||
translated["thinking_budget"] = int(config["thinkingBudget"])
|
||||
return translated or None
|
||||
|
||||
|
||||
def _is_gemini_openai_compat_base_url(base_url: Any) -> bool:
|
||||
normalized = str(base_url or "").strip().rstrip("/").lower()
|
||||
if not normalized:
|
||||
return False
|
||||
if "generativelanguage.googleapis.com" not in normalized:
|
||||
return False
|
||||
return normalized.endswith("/openai")
|
||||
|
||||
|
||||
class ChatCompletionsTransport(ProviderTransport):
|
||||
"""Transport for api_mode='chat_completions'.
|
||||
|
||||
The default path for OpenAI-compatible providers.
|
||||
"""
|
||||
|
||||
@property
|
||||
def api_mode(self) -> str:
|
||||
return "chat_completions"
|
||||
|
||||
def convert_messages(
|
||||
self, messages: list[dict[str, Any]], **kwargs
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Messages are already in OpenAI format — sanitize Codex leaks only.
|
||||
|
||||
Strips Codex Responses API fields (``codex_reasoning_items`` /
|
||||
``codex_message_items`` on the message, ``call_id``/``response_item_id``
|
||||
on tool_calls) that strict chat-completions providers reject with 400/422.
|
||||
"""
|
||||
needs_sanitize = False
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
if "codex_reasoning_items" in msg or "codex_message_items" in msg:
|
||||
needs_sanitize = True
|
||||
break
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if isinstance(tool_calls, list):
|
||||
for tc in tool_calls:
|
||||
if isinstance(tc, dict) and (
|
||||
"call_id" in tc or "response_item_id" in tc
|
||||
):
|
||||
needs_sanitize = True
|
||||
break
|
||||
if needs_sanitize:
|
||||
break
|
||||
|
||||
if not needs_sanitize:
|
||||
return messages
|
||||
|
||||
sanitized = copy.deepcopy(messages)
|
||||
for msg in sanitized:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
msg.pop("codex_reasoning_items", None)
|
||||
msg.pop("codex_message_items", None)
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if isinstance(tool_calls, list):
|
||||
for tc in tool_calls:
|
||||
if isinstance(tc, dict):
|
||||
tc.pop("call_id", None)
|
||||
tc.pop("response_item_id", None)
|
||||
return sanitized
|
||||
|
||||
def convert_tools(self, tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""Tools are already in OpenAI format — identity."""
|
||||
return tools
|
||||
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, Any]],
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
**params,
|
||||
) -> dict[str, Any]:
|
||||
"""Build chat.completions.create() kwargs.
|
||||
|
||||
params (all optional):
|
||||
timeout: float — API call timeout
|
||||
max_tokens: int | None — user-configured max tokens
|
||||
ephemeral_max_output_tokens: int | None — one-shot override
|
||||
max_tokens_param_fn: callable — returns {max_tokens: N} or {max_completion_tokens: N}
|
||||
reasoning_config: dict | None
|
||||
request_overrides: dict | None
|
||||
session_id: str | None
|
||||
model_lower: str — lowercase model name for pattern matching
|
||||
# Provider profile path (all per-provider quirks live in providers/)
|
||||
provider_profile: ProviderProfile | None — when present, delegates to
|
||||
_build_kwargs_from_profile(); all flag params below are bypassed.
|
||||
# Legacy-path flags — only used when provider_profile is None
|
||||
# (i.e. custom / unregistered providers). Known providers all go
|
||||
# through provider_profile.
|
||||
is_openrouter: bool
|
||||
is_nous: bool
|
||||
is_qwen_portal: bool
|
||||
is_github_models: bool
|
||||
is_nvidia_nim: bool
|
||||
is_kimi: bool
|
||||
is_tokenhub: bool
|
||||
is_lmstudio: bool
|
||||
is_custom_provider: bool
|
||||
ollama_num_ctx: int | None
|
||||
# Provider routing
|
||||
provider_preferences: dict | None
|
||||
# Qwen-specific
|
||||
qwen_prepare_fn: callable | None — runs AFTER codex sanitization
|
||||
qwen_prepare_inplace_fn: callable | None — in-place variant for deepcopied lists
|
||||
qwen_session_metadata: dict | None
|
||||
# Temperature
|
||||
fixed_temperature: Any — from _fixed_temperature_for_model()
|
||||
omit_temperature: bool
|
||||
# Reasoning
|
||||
supports_reasoning: bool
|
||||
github_reasoning_extra: dict | None
|
||||
lmstudio_reasoning_options: list[str] | None # raw allowed_options from /api/v1/models
|
||||
# Claude on OpenRouter/Nous max output
|
||||
anthropic_max_output: int | None
|
||||
extra_body_additions: dict | None
|
||||
"""
|
||||
# Codex sanitization: drop reasoning_items / call_id / response_item_id
|
||||
sanitized = self.convert_messages(messages)
|
||||
|
||||
# ── Provider profile: single-path when present ──────────────────
|
||||
_profile = params.get("provider_profile")
|
||||
if _profile:
|
||||
return self._build_kwargs_from_profile(
|
||||
_profile, model, sanitized, tools, params
|
||||
)
|
||||
|
||||
# ── Legacy fallback (unregistered / unknown provider) ───────────
|
||||
# Reached only when get_provider_profile() returned None.
|
||||
# Known providers always go through the profile path above.
|
||||
|
||||
# Developer role swap for GPT-5/Codex models
|
||||
model_lower = params.get("model_lower", (model or "").lower())
|
||||
if (
|
||||
sanitized
|
||||
and isinstance(sanitized[0], dict)
|
||||
and sanitized[0].get("role") == "system"
|
||||
and any(p in model_lower for p in DEVELOPER_ROLE_MODELS)
|
||||
):
|
||||
sanitized = list(sanitized)
|
||||
sanitized[0] = {**sanitized[0], "role": "developer"}
|
||||
|
||||
api_kwargs: dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": sanitized,
|
||||
}
|
||||
|
||||
timeout = params.get("timeout")
|
||||
if timeout is not None:
|
||||
api_kwargs["timeout"] = timeout
|
||||
|
||||
# Tools
|
||||
if tools:
|
||||
# Moonshot/Kimi uses a stricter flavored JSON Schema. Rewriting
|
||||
# tool parameters here keeps aggregator routes (Nous, OpenRouter,
|
||||
# etc.) compatible, in addition to direct moonshot.ai endpoints.
|
||||
if is_moonshot_model(model):
|
||||
tools = sanitize_moonshot_tools(tools)
|
||||
api_kwargs["tools"] = tools
|
||||
|
||||
# max_tokens resolution — priority: ephemeral > user > provider default
|
||||
max_tokens_fn = params.get("max_tokens_param_fn")
|
||||
ephemeral = params.get("ephemeral_max_output_tokens")
|
||||
max_tokens = params.get("max_tokens")
|
||||
anthropic_max_out = params.get("anthropic_max_output")
|
||||
is_nvidia_nim = params.get("is_nvidia_nim", False)
|
||||
is_kimi = params.get("is_kimi", False)
|
||||
is_tokenhub = params.get("is_tokenhub", False)
|
||||
reasoning_config = params.get("reasoning_config")
|
||||
|
||||
if ephemeral is not None and max_tokens_fn:
|
||||
api_kwargs.update(max_tokens_fn(ephemeral))
|
||||
elif max_tokens is not None and max_tokens_fn:
|
||||
api_kwargs.update(max_tokens_fn(max_tokens))
|
||||
elif anthropic_max_out is not None:
|
||||
api_kwargs["max_tokens"] = anthropic_max_out
|
||||
|
||||
# Kimi: top-level reasoning_effort (unless thinking disabled)
|
||||
if is_kimi:
|
||||
_kimi_thinking_off = bool(
|
||||
reasoning_config
|
||||
and isinstance(reasoning_config, dict)
|
||||
and reasoning_config.get("enabled") is False
|
||||
)
|
||||
if not _kimi_thinking_off:
|
||||
_kimi_effort = "medium"
|
||||
if reasoning_config and isinstance(reasoning_config, dict):
|
||||
_e = (reasoning_config.get("effort") or "").strip().lower()
|
||||
if _e in ("low", "medium", "high"):
|
||||
_kimi_effort = _e
|
||||
api_kwargs["reasoning_effort"] = _kimi_effort
|
||||
|
||||
# Tencent TokenHub: top-level reasoning_effort (unless thinking disabled)
|
||||
if is_tokenhub:
|
||||
_tokenhub_thinking_off = bool(
|
||||
reasoning_config
|
||||
and isinstance(reasoning_config, dict)
|
||||
and reasoning_config.get("enabled") is False
|
||||
)
|
||||
if not _tokenhub_thinking_off:
|
||||
_tokenhub_effort = "high"
|
||||
if reasoning_config and isinstance(reasoning_config, dict):
|
||||
_e = (reasoning_config.get("effort") or "").strip().lower()
|
||||
if _e in ("low", "medium", "high"):
|
||||
_tokenhub_effort = _e
|
||||
api_kwargs["reasoning_effort"] = _tokenhub_effort
|
||||
|
||||
# LM Studio: top-level reasoning_effort. Only emit when the model
|
||||
# declares reasoning support via /api/v1/models capabilities (gated
|
||||
# upstream by params["supports_reasoning"]). resolve_lmstudio_effort
|
||||
# is shared with run_agent's summary path so both stay in sync.
|
||||
if params.get("is_lmstudio", False) and params.get("supports_reasoning", False):
|
||||
_lm_effort = resolve_lmstudio_effort(
|
||||
reasoning_config,
|
||||
params.get("lmstudio_reasoning_options"),
|
||||
)
|
||||
if _lm_effort is not None:
|
||||
api_kwargs["reasoning_effort"] = _lm_effort
|
||||
|
||||
# extra_body assembly
|
||||
extra_body: dict[str, Any] = {}
|
||||
|
||||
is_openrouter = params.get("is_openrouter", False)
|
||||
is_nous = params.get("is_nous", False)
|
||||
is_github_models = params.get("is_github_models", False)
|
||||
provider_name = str(params.get("provider_name") or "").strip().lower()
|
||||
base_url = params.get("base_url")
|
||||
|
||||
provider_prefs = params.get("provider_preferences")
|
||||
if provider_prefs and is_openrouter:
|
||||
extra_body["provider"] = provider_prefs
|
||||
|
||||
# Kimi extra_body.thinking
|
||||
if is_kimi:
|
||||
_kimi_thinking_enabled = True
|
||||
if reasoning_config and isinstance(reasoning_config, dict):
|
||||
if reasoning_config.get("enabled") is False:
|
||||
_kimi_thinking_enabled = False
|
||||
extra_body["thinking"] = {
|
||||
"type": "enabled" if _kimi_thinking_enabled else "disabled",
|
||||
}
|
||||
|
||||
# Reasoning. LM Studio is handled above via top-level reasoning_effort,
|
||||
# so skip emitting extra_body.reasoning for it.
|
||||
if params.get("supports_reasoning", False) and not params.get("is_lmstudio", False):
|
||||
if is_github_models:
|
||||
gh_reasoning = params.get("github_reasoning_extra")
|
||||
if gh_reasoning is not None:
|
||||
extra_body["reasoning"] = gh_reasoning
|
||||
else:
|
||||
extra_body["reasoning"] = {"enabled": True, "effort": "medium"}
|
||||
|
||||
if provider_name == "gemini":
|
||||
raw_thinking_config = _build_gemini_thinking_config(model, reasoning_config)
|
||||
if _is_gemini_openai_compat_base_url(base_url):
|
||||
thinking_config = _snake_case_gemini_thinking_config(raw_thinking_config)
|
||||
if thinking_config:
|
||||
openai_compat_extra = extra_body.get("extra_body", {})
|
||||
google_extra = openai_compat_extra.get("google", {})
|
||||
google_extra["thinking_config"] = thinking_config
|
||||
openai_compat_extra["google"] = google_extra
|
||||
extra_body["extra_body"] = openai_compat_extra
|
||||
elif raw_thinking_config:
|
||||
extra_body["thinking_config"] = raw_thinking_config
|
||||
elif provider_name == "google-gemini-cli":
|
||||
thinking_config = _build_gemini_thinking_config(model, reasoning_config)
|
||||
if thinking_config:
|
||||
extra_body["thinking_config"] = thinking_config
|
||||
|
||||
# Merge any pre-built extra_body additions
|
||||
additions = params.get("extra_body_additions")
|
||||
if additions:
|
||||
extra_body.update(additions)
|
||||
|
||||
if extra_body:
|
||||
api_kwargs["extra_body"] = extra_body
|
||||
|
||||
# Request overrides last (service_tier etc.)
|
||||
overrides = params.get("request_overrides")
|
||||
if overrides:
|
||||
api_kwargs.update(overrides)
|
||||
|
||||
return api_kwargs
|
||||
|
||||
def _build_kwargs_from_profile(self, profile, model, sanitized, tools, params):
|
||||
"""Build API kwargs using a ProviderProfile — single path, no legacy flags.
|
||||
|
||||
This method replaces the entire flag-based kwargs assembly when a
|
||||
provider_profile is passed. Every quirk comes from the profile object.
|
||||
"""
|
||||
from providers.base import OMIT_TEMPERATURE
|
||||
|
||||
# Message preprocessing
|
||||
sanitized = profile.prepare_messages(sanitized)
|
||||
|
||||
# Developer role swap — model-name-based, applies to all providers
|
||||
_model_lower = (model or "").lower()
|
||||
if (
|
||||
sanitized
|
||||
and isinstance(sanitized[0], dict)
|
||||
and sanitized[0].get("role") == "system"
|
||||
and any(p in _model_lower for p in DEVELOPER_ROLE_MODELS)
|
||||
):
|
||||
sanitized = list(sanitized)
|
||||
sanitized[0] = {**sanitized[0], "role": "developer"}
|
||||
|
||||
api_kwargs: dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": sanitized,
|
||||
}
|
||||
|
||||
# Temperature
|
||||
if profile.fixed_temperature is OMIT_TEMPERATURE:
|
||||
pass # Don't include temperature at all
|
||||
elif profile.fixed_temperature is not None:
|
||||
api_kwargs["temperature"] = profile.fixed_temperature
|
||||
else:
|
||||
# Use caller's temperature if provided
|
||||
temp = params.get("temperature")
|
||||
if temp is not None:
|
||||
api_kwargs["temperature"] = temp
|
||||
|
||||
# Timeout
|
||||
timeout = params.get("timeout")
|
||||
if timeout is not None:
|
||||
api_kwargs["timeout"] = timeout
|
||||
|
||||
# Tools — apply Moonshot/Kimi schema sanitization regardless of path
|
||||
if tools:
|
||||
if is_moonshot_model(model):
|
||||
tools = sanitize_moonshot_tools(tools)
|
||||
api_kwargs["tools"] = tools
|
||||
|
||||
# max_tokens resolution — priority: ephemeral > user > profile default
|
||||
max_tokens_fn = params.get("max_tokens_param_fn")
|
||||
ephemeral = params.get("ephemeral_max_output_tokens")
|
||||
user_max = params.get("max_tokens")
|
||||
anthropic_max = params.get("anthropic_max_output")
|
||||
|
||||
if ephemeral is not None and max_tokens_fn:
|
||||
api_kwargs.update(max_tokens_fn(ephemeral))
|
||||
elif user_max is not None and max_tokens_fn:
|
||||
api_kwargs.update(max_tokens_fn(user_max))
|
||||
elif profile.default_max_tokens and max_tokens_fn:
|
||||
api_kwargs.update(max_tokens_fn(profile.default_max_tokens))
|
||||
elif anthropic_max is not None:
|
||||
api_kwargs["max_tokens"] = anthropic_max
|
||||
|
||||
# Provider-specific api_kwargs extras (reasoning_effort, metadata, etc.)
|
||||
reasoning_config = params.get("reasoning_config")
|
||||
extra_body_from_profile, top_level_from_profile = (
|
||||
profile.build_api_kwargs_extras(
|
||||
reasoning_config=reasoning_config,
|
||||
supports_reasoning=params.get("supports_reasoning", False),
|
||||
qwen_session_metadata=params.get("qwen_session_metadata"),
|
||||
model=model,
|
||||
ollama_num_ctx=params.get("ollama_num_ctx"),
|
||||
)
|
||||
)
|
||||
api_kwargs.update(top_level_from_profile)
|
||||
|
||||
# extra_body assembly
|
||||
extra_body: dict[str, Any] = {}
|
||||
|
||||
# Profile's extra_body (tags, provider prefs, vl_high_resolution, etc.)
|
||||
profile_body = profile.build_extra_body(
|
||||
session_id=params.get("session_id"),
|
||||
provider_preferences=params.get("provider_preferences"),
|
||||
model=model,
|
||||
base_url=params.get("base_url"),
|
||||
reasoning_config=reasoning_config,
|
||||
)
|
||||
if profile_body:
|
||||
extra_body.update(profile_body)
|
||||
|
||||
# Profile's reasoning/thinking extra_body entries
|
||||
if extra_body_from_profile:
|
||||
extra_body.update(extra_body_from_profile)
|
||||
|
||||
# Merge any pre-built extra_body additions from the caller
|
||||
additions = params.get("extra_body_additions")
|
||||
if additions:
|
||||
extra_body.update(additions)
|
||||
|
||||
# Request overrides (user config)
|
||||
overrides = params.get("request_overrides")
|
||||
if overrides:
|
||||
for k, v in overrides.items():
|
||||
if k == "extra_body" and isinstance(v, dict):
|
||||
extra_body.update(v)
|
||||
else:
|
||||
api_kwargs[k] = v
|
||||
|
||||
if extra_body:
|
||||
api_kwargs["extra_body"] = extra_body
|
||||
|
||||
return api_kwargs
|
||||
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
"""Normalize OpenAI ChatCompletion to NormalizedResponse.
|
||||
|
||||
For chat_completions, this is near-identity — the response is already
|
||||
in OpenAI format. extra_content on tool_calls (Gemini thought_signature)
|
||||
is preserved via ToolCall.provider_data. reasoning_details (OpenRouter
|
||||
unified format) and reasoning_content (DeepSeek/Moonshot) are also
|
||||
preserved for downstream replay.
|
||||
"""
|
||||
choice = response.choices[0]
|
||||
msg = choice.message
|
||||
finish_reason = choice.finish_reason or "stop"
|
||||
|
||||
tool_calls = None
|
||||
if msg.tool_calls:
|
||||
tool_calls = []
|
||||
for tc in msg.tool_calls:
|
||||
# Preserve provider-specific extras on the tool call.
|
||||
# Gemini 3 thinking models attach extra_content with
|
||||
# thought_signature — without replay on the next turn the API
|
||||
# rejects the request with 400.
|
||||
tc_provider_data: dict[str, Any] = {}
|
||||
extra = getattr(tc, "extra_content", None)
|
||||
if extra is None and hasattr(tc, "model_extra"):
|
||||
extra = (tc.model_extra or {}).get("extra_content")
|
||||
if extra is not None:
|
||||
if hasattr(extra, "model_dump"):
|
||||
try:
|
||||
extra = extra.model_dump()
|
||||
except Exception:
|
||||
pass
|
||||
tc_provider_data["extra_content"] = extra
|
||||
tool_calls.append(
|
||||
ToolCall(
|
||||
id=tc.id,
|
||||
name=tc.function.name,
|
||||
arguments=tc.function.arguments,
|
||||
provider_data=tc_provider_data or None,
|
||||
)
|
||||
)
|
||||
|
||||
usage = None
|
||||
if hasattr(response, "usage") and response.usage:
|
||||
u = response.usage
|
||||
usage = Usage(
|
||||
prompt_tokens=getattr(u, "prompt_tokens", 0) or 0,
|
||||
completion_tokens=getattr(u, "completion_tokens", 0) or 0,
|
||||
total_tokens=getattr(u, "total_tokens", 0) or 0,
|
||||
)
|
||||
|
||||
# Preserve reasoning fields separately. DeepSeek/Moonshot use
|
||||
# ``reasoning_content``; others use ``reasoning``. Downstream code
|
||||
# (_extract_reasoning, thinking-prefill retry) reads both distinctly,
|
||||
# so keep them apart in provider_data rather than merging.
|
||||
reasoning = getattr(msg, "reasoning", None)
|
||||
reasoning_content = getattr(msg, "reasoning_content", None)
|
||||
if reasoning_content is None and hasattr(msg, "model_extra"):
|
||||
model_extra = getattr(msg, "model_extra", None) or {}
|
||||
if isinstance(model_extra, dict) and "reasoning_content" in model_extra:
|
||||
reasoning_content = model_extra["reasoning_content"]
|
||||
|
||||
provider_data: Dict[str, Any] = {}
|
||||
if reasoning_content is not None:
|
||||
provider_data["reasoning_content"] = reasoning_content
|
||||
rd = getattr(msg, "reasoning_details", None)
|
||||
if rd:
|
||||
provider_data["reasoning_details"] = rd
|
||||
|
||||
return NormalizedResponse(
|
||||
content=msg.content,
|
||||
tool_calls=tool_calls,
|
||||
finish_reason=finish_reason,
|
||||
reasoning=reasoning,
|
||||
usage=usage,
|
||||
provider_data=provider_data or None,
|
||||
)
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
"""Check that response has valid choices."""
|
||||
if response is None:
|
||||
return False
|
||||
if not hasattr(response, "choices") or response.choices is None:
|
||||
return False
|
||||
if not response.choices:
|
||||
return False
|
||||
return True
|
||||
|
||||
def extract_cache_stats(self, response: Any) -> dict[str, int] | None:
|
||||
"""Extract OpenRouter/OpenAI cache stats from prompt_tokens_details."""
|
||||
usage = getattr(response, "usage", None)
|
||||
if usage is None:
|
||||
return None
|
||||
details = getattr(usage, "prompt_tokens_details", None)
|
||||
if details is None:
|
||||
return None
|
||||
cached = getattr(details, "cached_tokens", 0) or 0
|
||||
written = getattr(details, "cache_write_tokens", 0) or 0
|
||||
if cached or written:
|
||||
return {"cached_tokens": cached, "creation_tokens": written}
|
||||
return None
|
||||
|
||||
|
||||
# Auto-register on import
|
||||
from agent.transports import register_transport # noqa: E402
|
||||
|
||||
register_transport("chat_completions", ChatCompletionsTransport)
|
||||
@@ -1,246 +0,0 @@
|
||||
"""OpenAI Responses API (Codex) transport.
|
||||
|
||||
Delegates to the existing adapter functions in agent/codex_responses_adapter.py.
|
||||
This transport owns format conversion and normalization — NOT client lifecycle,
|
||||
streaming, or the _run_codex_stream() call path.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse, ToolCall
|
||||
|
||||
|
||||
class ResponsesApiTransport(ProviderTransport):
|
||||
"""Transport for api_mode='codex_responses'.
|
||||
|
||||
Wraps the functions extracted into codex_responses_adapter.py (PR 1).
|
||||
"""
|
||||
|
||||
@property
|
||||
def api_mode(self) -> str:
|
||||
return "codex_responses"
|
||||
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
"""Convert OpenAI chat messages to Responses API input items."""
|
||||
from agent.codex_responses_adapter import _chat_messages_to_responses_input
|
||||
return _chat_messages_to_responses_input(messages)
|
||||
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
"""Convert OpenAI tool schemas to Responses API function definitions."""
|
||||
from agent.codex_responses_adapter import _responses_tools
|
||||
return _responses_tools(tools)
|
||||
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build Responses API kwargs.
|
||||
|
||||
Calls convert_messages and convert_tools internally.
|
||||
|
||||
params:
|
||||
instructions: str — system prompt (extracted from messages[0] if not given)
|
||||
reasoning_config: dict | None — {effort, enabled}
|
||||
session_id: str | None — used for prompt_cache_key + xAI conv header
|
||||
max_tokens: int | None — max_output_tokens
|
||||
request_overrides: dict | None — extra kwargs merged in
|
||||
provider: str | None — provider name for backend-specific logic
|
||||
base_url: str | None — endpoint URL
|
||||
base_url_hostname: str | None — hostname for backend detection
|
||||
is_github_responses: bool — Copilot/GitHub models backend
|
||||
is_codex_backend: bool — chatgpt.com/backend-api/codex
|
||||
is_xai_responses: bool — xAI/Grok backend
|
||||
github_reasoning_extra: dict | None — Copilot reasoning params
|
||||
"""
|
||||
from agent.codex_responses_adapter import (
|
||||
_chat_messages_to_responses_input,
|
||||
_responses_tools,
|
||||
)
|
||||
|
||||
from run_agent import DEFAULT_AGENT_IDENTITY
|
||||
|
||||
instructions = params.get("instructions", "")
|
||||
payload_messages = messages
|
||||
if not instructions:
|
||||
if messages and messages[0].get("role") == "system":
|
||||
instructions = str(messages[0].get("content") or "").strip()
|
||||
payload_messages = messages[1:]
|
||||
if not instructions:
|
||||
instructions = DEFAULT_AGENT_IDENTITY
|
||||
|
||||
is_github_responses = params.get("is_github_responses", False)
|
||||
is_codex_backend = params.get("is_codex_backend", False)
|
||||
is_xai_responses = params.get("is_xai_responses", False)
|
||||
|
||||
# Resolve reasoning effort
|
||||
reasoning_effort = "medium"
|
||||
reasoning_enabled = True
|
||||
reasoning_config = params.get("reasoning_config")
|
||||
if reasoning_config and isinstance(reasoning_config, dict):
|
||||
if reasoning_config.get("enabled") is False:
|
||||
reasoning_enabled = False
|
||||
elif reasoning_config.get("effort"):
|
||||
reasoning_effort = reasoning_config["effort"]
|
||||
|
||||
_effort_clamp = {"minimal": "low"}
|
||||
reasoning_effort = _effort_clamp.get(reasoning_effort, reasoning_effort)
|
||||
|
||||
kwargs = {
|
||||
"model": model,
|
||||
"instructions": instructions,
|
||||
"input": _chat_messages_to_responses_input(payload_messages),
|
||||
"tools": _responses_tools(tools),
|
||||
"tool_choice": "auto",
|
||||
"parallel_tool_calls": True,
|
||||
"store": False,
|
||||
}
|
||||
|
||||
session_id = params.get("session_id")
|
||||
if not is_github_responses and session_id:
|
||||
kwargs["prompt_cache_key"] = session_id
|
||||
|
||||
if reasoning_enabled and is_xai_responses:
|
||||
kwargs["include"] = ["reasoning.encrypted_content"]
|
||||
elif reasoning_enabled:
|
||||
if is_github_responses:
|
||||
github_reasoning = params.get("github_reasoning_extra")
|
||||
if github_reasoning is not None:
|
||||
kwargs["reasoning"] = github_reasoning
|
||||
else:
|
||||
kwargs["reasoning"] = {"effort": reasoning_effort, "summary": "auto"}
|
||||
kwargs["include"] = ["reasoning.encrypted_content"]
|
||||
elif not is_github_responses and not is_xai_responses:
|
||||
kwargs["include"] = []
|
||||
|
||||
request_overrides = params.get("request_overrides")
|
||||
if request_overrides:
|
||||
kwargs.update(request_overrides)
|
||||
|
||||
if is_codex_backend:
|
||||
prompt_cache_key = kwargs.get("prompt_cache_key")
|
||||
cache_scope_id = str(prompt_cache_key or session_id or "").strip()
|
||||
if cache_scope_id:
|
||||
existing_extra_headers = kwargs.get("extra_headers")
|
||||
merged_extra_headers: Dict[str, str] = {}
|
||||
if isinstance(existing_extra_headers, dict):
|
||||
merged_extra_headers.update(
|
||||
{
|
||||
str(key): str(value)
|
||||
for key, value in existing_extra_headers.items()
|
||||
if key and value is not None
|
||||
}
|
||||
)
|
||||
merged_extra_headers["session_id"] = cache_scope_id
|
||||
merged_extra_headers["x-client-request-id"] = cache_scope_id
|
||||
kwargs["extra_headers"] = merged_extra_headers
|
||||
|
||||
max_tokens = params.get("max_tokens")
|
||||
if max_tokens is not None and not is_codex_backend:
|
||||
kwargs["max_output_tokens"] = max_tokens
|
||||
|
||||
if is_xai_responses and session_id:
|
||||
existing_extra_headers = kwargs.get("extra_headers")
|
||||
merged_extra_headers: Dict[str, str] = {}
|
||||
if isinstance(existing_extra_headers, dict):
|
||||
merged_extra_headers.update(
|
||||
{
|
||||
str(key): str(value)
|
||||
for key, value in existing_extra_headers.items()
|
||||
if key and value is not None
|
||||
}
|
||||
)
|
||||
merged_extra_headers["x-grok-conv-id"] = session_id
|
||||
kwargs["extra_headers"] = merged_extra_headers
|
||||
|
||||
return kwargs
|
||||
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
"""Normalize Codex Responses API response to NormalizedResponse."""
|
||||
from agent.codex_responses_adapter import (
|
||||
_normalize_codex_response,
|
||||
)
|
||||
|
||||
# _normalize_codex_response returns (SimpleNamespace, finish_reason_str)
|
||||
msg, finish_reason = _normalize_codex_response(response)
|
||||
|
||||
tool_calls = None
|
||||
if msg and msg.tool_calls:
|
||||
tool_calls = []
|
||||
for tc in msg.tool_calls:
|
||||
provider_data = {}
|
||||
if hasattr(tc, "call_id") and tc.call_id:
|
||||
provider_data["call_id"] = tc.call_id
|
||||
if hasattr(tc, "response_item_id") and tc.response_item_id:
|
||||
provider_data["response_item_id"] = tc.response_item_id
|
||||
tool_calls.append(ToolCall(
|
||||
id=tc.id if hasattr(tc, "id") else (tc.function.name if hasattr(tc, "function") else None),
|
||||
name=tc.function.name if hasattr(tc, "function") else getattr(tc, "name", ""),
|
||||
arguments=tc.function.arguments if hasattr(tc, "function") else getattr(tc, "arguments", "{}"),
|
||||
provider_data=provider_data or None,
|
||||
))
|
||||
|
||||
# Extract reasoning items for provider_data
|
||||
provider_data = {}
|
||||
if msg and hasattr(msg, "codex_reasoning_items") and msg.codex_reasoning_items:
|
||||
provider_data["codex_reasoning_items"] = msg.codex_reasoning_items
|
||||
if msg and hasattr(msg, "codex_message_items") and msg.codex_message_items:
|
||||
provider_data["codex_message_items"] = msg.codex_message_items
|
||||
if msg and hasattr(msg, "reasoning_details") and msg.reasoning_details:
|
||||
provider_data["reasoning_details"] = msg.reasoning_details
|
||||
|
||||
return NormalizedResponse(
|
||||
content=msg.content if msg else None,
|
||||
tool_calls=tool_calls,
|
||||
finish_reason=finish_reason or "stop",
|
||||
reasoning=msg.reasoning if msg and hasattr(msg, "reasoning") else None,
|
||||
usage=None, # Codex usage is extracted separately in normalize_usage()
|
||||
provider_data=provider_data or None,
|
||||
)
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
"""Check Codex Responses API response has valid output structure.
|
||||
|
||||
Returns True only if response.output is a non-empty list.
|
||||
Does NOT check output_text fallback — the caller handles that
|
||||
with diagnostic logging for stream backfill recovery.
|
||||
"""
|
||||
if response is None:
|
||||
return False
|
||||
output = getattr(response, "output", None)
|
||||
if not isinstance(output, list) or not output:
|
||||
return False
|
||||
return True
|
||||
|
||||
def preflight_kwargs(self, api_kwargs: Any, *, allow_stream: bool = False) -> dict:
|
||||
"""Validate and sanitize Codex API kwargs before the call.
|
||||
|
||||
Normalizes input items, strips unsupported fields, validates structure.
|
||||
"""
|
||||
from agent.codex_responses_adapter import _preflight_codex_api_kwargs
|
||||
return _preflight_codex_api_kwargs(api_kwargs, allow_stream=allow_stream)
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
"""Map Codex response.status to OpenAI finish_reason.
|
||||
|
||||
Codex uses response.status ('completed', 'incomplete') +
|
||||
response.incomplete_details.reason for granular mapping.
|
||||
This method handles the simple status string; the caller
|
||||
should check incomplete_details separately for 'max_output_tokens'.
|
||||
"""
|
||||
_MAP = {
|
||||
"completed": "stop",
|
||||
"incomplete": "length",
|
||||
"failed": "stop",
|
||||
"cancelled": "stop",
|
||||
}
|
||||
return _MAP.get(raw_reason, "stop")
|
||||
|
||||
|
||||
# Auto-register on import
|
||||
from agent.transports import register_transport # noqa: E402
|
||||
|
||||
register_transport("codex_responses", ResponsesApiTransport)
|
||||
@@ -1,162 +0,0 @@
|
||||
"""Shared types for normalized provider responses.
|
||||
|
||||
These dataclasses define the canonical shape that all provider adapters
|
||||
normalize responses to. The shared surface is intentionally minimal —
|
||||
only fields that every downstream consumer reads are top-level.
|
||||
Protocol-specific state goes in ``provider_data`` dicts (response-level
|
||||
and per-tool-call) so that protocol-aware code paths can access it
|
||||
without polluting the shared type.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolCall:
|
||||
"""A normalized tool call from any provider.
|
||||
|
||||
``id`` is the protocol's canonical identifier — what gets used in
|
||||
``tool_call_id`` / ``tool_use_id`` when constructing tool result
|
||||
messages. May be ``None`` when the provider omits it; the agent
|
||||
fills it via ``_deterministic_call_id()`` before storing in history.
|
||||
|
||||
``provider_data`` carries per-tool-call protocol metadata that only
|
||||
protocol-aware code reads:
|
||||
|
||||
* Codex: ``{"call_id": "call_XXX", "response_item_id": "fc_XXX"}``
|
||||
* Gemini: ``{"extra_content": {"google": {"thought_signature": "..."}}}``
|
||||
* Others: ``None``
|
||||
"""
|
||||
|
||||
id: str | None
|
||||
name: str
|
||||
arguments: str # JSON string
|
||||
provider_data: dict[str, Any] | None = field(default=None, repr=False)
|
||||
|
||||
# ── Backward compatibility ──────────────────────────────────
|
||||
# The agent loop reads tc.function.name / tc.function.arguments
|
||||
# throughout run_agent.py (45+ sites). These properties let
|
||||
# NormalizedResponse pass through without the _nr_to_assistant_message
|
||||
# shim, while keeping ToolCall's canonical fields flat.
|
||||
@property
|
||||
def type(self) -> str:
|
||||
return "function"
|
||||
|
||||
@property
|
||||
def function(self) -> ToolCall:
|
||||
"""Return self so tc.function.name / tc.function.arguments work."""
|
||||
return self
|
||||
|
||||
@property
|
||||
def call_id(self) -> str | None:
|
||||
"""Codex call_id from provider_data, accessed via getattr by _build_assistant_message."""
|
||||
return (self.provider_data or {}).get("call_id")
|
||||
|
||||
@property
|
||||
def response_item_id(self) -> str | None:
|
||||
"""Codex response_item_id from provider_data."""
|
||||
return (self.provider_data or {}).get("response_item_id")
|
||||
|
||||
@property
|
||||
def extra_content(self) -> Optional[Dict[str, Any]]:
|
||||
"""Gemini extra_content (thought_signature) from provider_data.
|
||||
|
||||
Gemini 3 thinking models attach ``extra_content`` with a
|
||||
``thought_signature`` to each tool call. This signature must be
|
||||
replayed on subsequent API calls — without it the API rejects the
|
||||
request with HTTP 400. The chat_completions transport stores this
|
||||
in ``provider_data["extra_content"]``; this property exposes it so
|
||||
``_build_assistant_message`` can ``getattr(tc, "extra_content")``
|
||||
uniformly.
|
||||
"""
|
||||
return (self.provider_data or {}).get("extra_content")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Usage:
|
||||
"""Token usage from an API response."""
|
||||
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
cached_tokens: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class NormalizedResponse:
|
||||
"""Normalized API response from any provider.
|
||||
|
||||
Shared fields are truly cross-provider — every caller can rely on
|
||||
them without branching on api_mode. Protocol-specific state goes in
|
||||
``provider_data`` so that only protocol-aware code paths read it.
|
||||
|
||||
Response-level ``provider_data`` examples:
|
||||
|
||||
* Anthropic: ``{"reasoning_details": [...]}``
|
||||
* Codex: ``{"codex_reasoning_items": [...], "codex_message_items": [...]}``
|
||||
* Others: ``None``
|
||||
"""
|
||||
|
||||
content: str | None
|
||||
tool_calls: list[ToolCall] | None
|
||||
finish_reason: str # "stop", "tool_calls", "length", "content_filter"
|
||||
reasoning: str | None = None
|
||||
usage: Usage | None = None
|
||||
provider_data: dict[str, Any] | None = field(default=None, repr=False)
|
||||
|
||||
# ── Backward compatibility ──────────────────────────────────
|
||||
# The shim _nr_to_assistant_message() mapped these from provider_data.
|
||||
# These properties let NormalizedResponse pass through directly.
|
||||
@property
|
||||
def reasoning_content(self) -> str | None:
|
||||
pd = self.provider_data or {}
|
||||
return pd.get("reasoning_content")
|
||||
|
||||
@property
|
||||
def reasoning_details(self):
|
||||
pd = self.provider_data or {}
|
||||
return pd.get("reasoning_details")
|
||||
|
||||
@property
|
||||
def codex_reasoning_items(self):
|
||||
pd = self.provider_data or {}
|
||||
return pd.get("codex_reasoning_items")
|
||||
|
||||
@property
|
||||
def codex_message_items(self):
|
||||
pd = self.provider_data or {}
|
||||
return pd.get("codex_message_items")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Factory helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def build_tool_call(
|
||||
id: str | None,
|
||||
name: str,
|
||||
arguments: Any,
|
||||
**provider_fields: Any,
|
||||
) -> ToolCall:
|
||||
"""Build a ``ToolCall``, auto-serialising *arguments* if it's a dict.
|
||||
|
||||
Any extra keyword arguments are collected into ``provider_data``.
|
||||
"""
|
||||
args_str = json.dumps(arguments) if isinstance(arguments, dict) else str(arguments)
|
||||
pd = dict(provider_fields) if provider_fields else None
|
||||
return ToolCall(id=id, name=name, arguments=args_str, provider_data=pd)
|
||||
|
||||
|
||||
def map_finish_reason(reason: str | None, mapping: dict[str, str]) -> str:
|
||||
"""Translate a provider-specific stop reason to the normalised set.
|
||||
|
||||
Falls back to ``"stop"`` for unknown or ``None`` reasons.
|
||||
"""
|
||||
if reason is None:
|
||||
return "stop"
|
||||
return mapping.get(reason, "stop")
|
||||
@@ -1,13 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from decimal import Decimal
|
||||
from typing import Any, Dict, Literal, Optional
|
||||
|
||||
from agent.model_metadata import fetch_endpoint_model_metadata, fetch_model_metadata
|
||||
from utils import base_url_host_matches
|
||||
|
||||
DEFAULT_PRICING = {"input": 0.0, "output": 0.0}
|
||||
|
||||
@@ -83,121 +81,6 @@ _UTC_NOW = lambda: datetime.now(timezone.utc)
|
||||
# Official docs snapshot entries. Models whose published pricing and cache
|
||||
# semantics are stable enough to encode exactly.
|
||||
_OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
# ── Anthropic Claude 4.7 ─────────────────────────────────────────────
|
||||
# Opus 4.5/4.6/4.7 share $5/$25 pricing (new tokenizer, up to 35% more
|
||||
# tokens for the same text).
|
||||
# Source: https://platform.claude.com/docs/en/about-claude/pricing
|
||||
(
|
||||
"anthropic",
|
||||
"claude-opus-4-7",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("5.00"),
|
||||
output_cost_per_million=Decimal("25.00"),
|
||||
cache_read_cost_per_million=Decimal("0.50"),
|
||||
cache_write_cost_per_million=Decimal("6.25"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
"claude-opus-4-7-20250507",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("5.00"),
|
||||
output_cost_per_million=Decimal("25.00"),
|
||||
cache_read_cost_per_million=Decimal("0.50"),
|
||||
cache_write_cost_per_million=Decimal("6.25"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
# ── Anthropic Claude 4.6 ─────────────────────────────────────────────
|
||||
(
|
||||
"anthropic",
|
||||
"claude-opus-4-6",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("5.00"),
|
||||
output_cost_per_million=Decimal("25.00"),
|
||||
cache_read_cost_per_million=Decimal("0.50"),
|
||||
cache_write_cost_per_million=Decimal("6.25"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
"claude-opus-4-6-20250414",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("5.00"),
|
||||
output_cost_per_million=Decimal("25.00"),
|
||||
cache_read_cost_per_million=Decimal("0.50"),
|
||||
cache_write_cost_per_million=Decimal("6.25"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
"claude-sonnet-4-6",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("3.00"),
|
||||
output_cost_per_million=Decimal("15.00"),
|
||||
cache_read_cost_per_million=Decimal("0.30"),
|
||||
cache_write_cost_per_million=Decimal("3.75"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
"claude-sonnet-4-6-20250414",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("3.00"),
|
||||
output_cost_per_million=Decimal("15.00"),
|
||||
cache_read_cost_per_million=Decimal("0.30"),
|
||||
cache_write_cost_per_million=Decimal("3.75"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
# ── Anthropic Claude 4.5 ─────────────────────────────────────────────
|
||||
(
|
||||
"anthropic",
|
||||
"claude-opus-4-5",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("5.00"),
|
||||
output_cost_per_million=Decimal("25.00"),
|
||||
cache_read_cost_per_million=Decimal("0.50"),
|
||||
cache_write_cost_per_million=Decimal("6.25"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
"claude-sonnet-4-5",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("3.00"),
|
||||
output_cost_per_million=Decimal("15.00"),
|
||||
cache_read_cost_per_million=Decimal("0.30"),
|
||||
cache_write_cost_per_million=Decimal("3.75"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
"claude-haiku-4-5",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("1.00"),
|
||||
output_cost_per_million=Decimal("5.00"),
|
||||
cache_read_cost_per_million=Decimal("0.10"),
|
||||
cache_write_cost_per_million=Decimal("1.25"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
),
|
||||
# ── Anthropic Claude 4 / 4.1 ─────────────────────────────────────────
|
||||
(
|
||||
"anthropic",
|
||||
"claude-opus-4-20250514",
|
||||
@@ -207,8 +90,8 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
cache_read_cost_per_million=Decimal("1.50"),
|
||||
cache_write_cost_per_million=Decimal("18.75"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
source_url="https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching",
|
||||
pricing_version="anthropic-prompt-caching-2026-03-16",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
@@ -219,8 +102,8 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
cache_read_cost_per_million=Decimal("0.30"),
|
||||
cache_write_cost_per_million=Decimal("3.75"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
source_url="https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching",
|
||||
pricing_version="anthropic-prompt-caching-2026-03-16",
|
||||
),
|
||||
# OpenAI
|
||||
(
|
||||
@@ -300,7 +183,7 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
source_url="https://openai.com/api/pricing/",
|
||||
pricing_version="openai-pricing-2026-03-16",
|
||||
),
|
||||
# ── Anthropic older models (pre-4.5 generation) ────────────────────────
|
||||
# Anthropic older models (pre-4.6 generation)
|
||||
(
|
||||
"anthropic",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
@@ -310,8 +193,8 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
cache_read_cost_per_million=Decimal("0.30"),
|
||||
cache_write_cost_per_million=Decimal("3.75"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
source_url="https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching",
|
||||
pricing_version="anthropic-pricing-2026-03-16",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
@@ -322,8 +205,8 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
cache_read_cost_per_million=Decimal("0.08"),
|
||||
cache_write_cost_per_million=Decimal("1.00"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
source_url="https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching",
|
||||
pricing_version="anthropic-pricing-2026-03-16",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
@@ -334,8 +217,8 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
cache_read_cost_per_million=Decimal("1.50"),
|
||||
cache_write_cost_per_million=Decimal("18.75"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
source_url="https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching",
|
||||
pricing_version="anthropic-pricing-2026-03-16",
|
||||
),
|
||||
(
|
||||
"anthropic",
|
||||
@@ -346,8 +229,8 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
cache_read_cost_per_million=Decimal("0.03"),
|
||||
cache_write_cost_per_million=Decimal("0.30"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://platform.claude.com/docs/en/about-claude/pricing",
|
||||
pricing_version="anthropic-pricing-2026-05",
|
||||
source_url="https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching",
|
||||
pricing_version="anthropic-pricing-2026-03-16",
|
||||
),
|
||||
# DeepSeek
|
||||
(
|
||||
@@ -401,99 +284,6 @@ _OFFICIAL_DOCS_PRICING: Dict[tuple[str, str], PricingEntry] = {
|
||||
source_url="https://ai.google.dev/pricing",
|
||||
pricing_version="google-pricing-2026-03-16",
|
||||
),
|
||||
# AWS Bedrock — pricing per the Bedrock pricing page.
|
||||
# Bedrock charges the same per-token rates as the model provider but
|
||||
# through AWS billing. These are the on-demand prices (no commitment).
|
||||
# Source: https://aws.amazon.com/bedrock/pricing/
|
||||
(
|
||||
"bedrock",
|
||||
"anthropic.claude-opus-4-6",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("15.00"),
|
||||
output_cost_per_million=Decimal("75.00"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://aws.amazon.com/bedrock/pricing/",
|
||||
pricing_version="bedrock-pricing-2026-04",
|
||||
),
|
||||
(
|
||||
"bedrock",
|
||||
"anthropic.claude-sonnet-4-6",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("3.00"),
|
||||
output_cost_per_million=Decimal("15.00"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://aws.amazon.com/bedrock/pricing/",
|
||||
pricing_version="bedrock-pricing-2026-04",
|
||||
),
|
||||
(
|
||||
"bedrock",
|
||||
"anthropic.claude-sonnet-4-5",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("3.00"),
|
||||
output_cost_per_million=Decimal("15.00"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://aws.amazon.com/bedrock/pricing/",
|
||||
pricing_version="bedrock-pricing-2026-04",
|
||||
),
|
||||
(
|
||||
"bedrock",
|
||||
"anthropic.claude-haiku-4-5",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("0.80"),
|
||||
output_cost_per_million=Decimal("4.00"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://aws.amazon.com/bedrock/pricing/",
|
||||
pricing_version="bedrock-pricing-2026-04",
|
||||
),
|
||||
(
|
||||
"bedrock",
|
||||
"amazon.nova-pro",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("0.80"),
|
||||
output_cost_per_million=Decimal("3.20"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://aws.amazon.com/bedrock/pricing/",
|
||||
pricing_version="bedrock-pricing-2026-04",
|
||||
),
|
||||
(
|
||||
"bedrock",
|
||||
"amazon.nova-lite",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("0.06"),
|
||||
output_cost_per_million=Decimal("0.24"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://aws.amazon.com/bedrock/pricing/",
|
||||
pricing_version="bedrock-pricing-2026-04",
|
||||
),
|
||||
(
|
||||
"bedrock",
|
||||
"amazon.nova-micro",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("0.035"),
|
||||
output_cost_per_million=Decimal("0.14"),
|
||||
source="official_docs_snapshot",
|
||||
source_url="https://aws.amazon.com/bedrock/pricing/",
|
||||
pricing_version="bedrock-pricing-2026-04",
|
||||
),
|
||||
# MiniMax
|
||||
(
|
||||
"minimax",
|
||||
"minimax-m2.7",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("0.30"),
|
||||
output_cost_per_million=Decimal("1.20"),
|
||||
source="official_docs_snapshot",
|
||||
pricing_version="minimax-pricing-2026-04",
|
||||
),
|
||||
(
|
||||
"minimax-cn",
|
||||
"minimax-m2.7",
|
||||
): PricingEntry(
|
||||
input_cost_per_million=Decimal("0.30"),
|
||||
output_cost_per_million=Decimal("1.20"),
|
||||
source="official_docs_snapshot",
|
||||
pricing_version="minimax-pricing-2026-04",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@@ -529,50 +319,19 @@ def resolve_billing_route(
|
||||
|
||||
if provider_name == "openai-codex":
|
||||
return BillingRoute(provider="openai-codex", model=model, base_url=base_url or "", billing_mode="subscription_included")
|
||||
if provider_name == "openrouter" or base_url_host_matches(base_url or "", "openrouter.ai"):
|
||||
if provider_name == "openrouter" or "openrouter.ai" in base:
|
||||
return BillingRoute(provider="openrouter", model=model, base_url=base_url or "", billing_mode="official_models_api")
|
||||
if provider_name == "anthropic":
|
||||
return BillingRoute(provider="anthropic", model=model.split("/")[-1], base_url=base_url or "", billing_mode="official_docs_snapshot")
|
||||
if provider_name == "openai":
|
||||
return BillingRoute(provider="openai", model=model.split("/")[-1], base_url=base_url or "", billing_mode="official_docs_snapshot")
|
||||
if provider_name in {"minimax", "minimax-cn"}:
|
||||
return BillingRoute(provider=provider_name, model=model.split("/")[-1], base_url=base_url or "", billing_mode="official_docs_snapshot")
|
||||
if provider_name in {"custom", "local"} or (base and "localhost" in base):
|
||||
return BillingRoute(provider=provider_name or "custom", model=model, base_url=base_url or "", billing_mode="unknown")
|
||||
return BillingRoute(provider=provider_name or "unknown", model=model.split("/")[-1] if model else "", base_url=base_url or "", billing_mode="unknown")
|
||||
|
||||
|
||||
def _normalize_anthropic_model_name(model: str) -> str:
|
||||
"""Normalize Anthropic model name variants to canonical form.
|
||||
|
||||
Handles:
|
||||
- Dot notation: claude-opus-4.7 → claude-opus-4-7
|
||||
- Short aliases: claude-opus-4.7 → claude-opus-4-7
|
||||
- Strips anthropic/ prefix if present
|
||||
"""
|
||||
name = model.lower().strip()
|
||||
if name.startswith("anthropic/"):
|
||||
name = name[len("anthropic/"):]
|
||||
# Normalize dots to dashes in version numbers (e.g. 4.7 → 4-7, 4.6 → 4-6)
|
||||
# But preserve the rest of the name structure
|
||||
name = re.sub(r"(\d+)\.(\d+)", r"\1-\2", name)
|
||||
return name
|
||||
|
||||
|
||||
def _lookup_official_docs_pricing(route: BillingRoute) -> Optional[PricingEntry]:
|
||||
model = route.model.lower()
|
||||
# Direct lookup first
|
||||
entry = _OFFICIAL_DOCS_PRICING.get((route.provider, model))
|
||||
if entry:
|
||||
return entry
|
||||
# Try normalized name for Anthropic (handles dot-notation like opus-4.7)
|
||||
if route.provider == "anthropic":
|
||||
normalized = _normalize_anthropic_model_name(model)
|
||||
if normalized != model:
|
||||
entry = _OFFICIAL_DOCS_PRICING.get((route.provider, normalized))
|
||||
if entry:
|
||||
return entry
|
||||
return None
|
||||
return _OFFICIAL_DOCS_PRICING.get((route.provider, route.model.lower()))
|
||||
|
||||
|
||||
def _openrouter_pricing_entry(route: BillingRoute) -> Optional[PricingEntry]:
|
||||
@@ -699,22 +458,10 @@ def normalize_usage(
|
||||
prompt_total = _to_int(getattr(response_usage, "prompt_tokens", 0))
|
||||
output_tokens = _to_int(getattr(response_usage, "completion_tokens", 0))
|
||||
details = getattr(response_usage, "prompt_tokens_details", None)
|
||||
# Primary: OpenAI-style prompt_tokens_details. Fallback: Anthropic-style
|
||||
# top-level fields that some OpenAI-compatible proxies (OpenRouter, Vercel
|
||||
# AI Gateway, Cline) expose when routing Claude models — without this
|
||||
# fallback, cache writes are undercounted as 0 and cache reads can be
|
||||
# missed when the proxy only surfaces them at the top level.
|
||||
# Port of cline/cline#10266.
|
||||
cache_read_tokens = _to_int(getattr(details, "cached_tokens", 0) if details else 0)
|
||||
if not cache_read_tokens:
|
||||
cache_read_tokens = _to_int(getattr(response_usage, "cache_read_input_tokens", 0))
|
||||
cache_write_tokens = _to_int(
|
||||
getattr(details, "cache_write_tokens", 0) if details else 0
|
||||
)
|
||||
if not cache_write_tokens:
|
||||
cache_write_tokens = _to_int(
|
||||
getattr(response_usage, "cache_creation_input_tokens", 0)
|
||||
)
|
||||
input_tokens = max(0, prompt_total - cache_read_tokens - cache_write_tokens)
|
||||
|
||||
reasoning_tokens = 0
|
||||
@@ -828,6 +575,49 @@ def has_known_pricing(
|
||||
return entry is not None
|
||||
|
||||
|
||||
def get_pricing(
|
||||
model_name: str,
|
||||
provider: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
) -> Dict[str, float]:
|
||||
"""Backward-compatible thin wrapper for legacy callers.
|
||||
|
||||
Returns only non-cache input/output fields when a pricing entry exists.
|
||||
Unknown routes return zeroes.
|
||||
"""
|
||||
entry = get_pricing_entry(model_name, provider=provider, base_url=base_url, api_key=api_key)
|
||||
if not entry:
|
||||
return {"input": 0.0, "output": 0.0}
|
||||
return {
|
||||
"input": float(entry.input_cost_per_million or _ZERO),
|
||||
"output": float(entry.output_cost_per_million or _ZERO),
|
||||
}
|
||||
|
||||
|
||||
def estimate_cost_usd(
|
||||
model: str,
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
*,
|
||||
provider: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
) -> float:
|
||||
"""Backward-compatible helper for legacy callers.
|
||||
|
||||
This uses non-cached input/output only. New code should call
|
||||
`estimate_usage_cost()` with canonical usage buckets.
|
||||
"""
|
||||
result = estimate_usage_cost(
|
||||
model,
|
||||
CanonicalUsage(input_tokens=input_tokens, output_tokens=output_tokens),
|
||||
provider=provider,
|
||||
base_url=base_url,
|
||||
api_key=api_key,
|
||||
)
|
||||
return float(result.amount_usd or _ZERO)
|
||||
|
||||
|
||||
def format_duration_compact(seconds: float) -> str:
|
||||
if seconds < 60:
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
# Hermes Agent — Web UI
|
||||
|
||||
Browser-based dashboard for managing Hermes Agent configuration, API keys, and monitoring active sessions.
|
||||
|
||||
## Stack
|
||||
|
||||
- **Vite** + **React 19** + **TypeScript**
|
||||
- **Tailwind CSS v4** with custom dark theme
|
||||
- **shadcn/ui**-style components (hand-rolled, no CLI dependency)
|
||||
|
||||
## Development
|
||||
|
||||
Install workspace dependencies from the repo root first:
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
Start the backend API server from the repo root:
|
||||
|
||||
```bash
|
||||
hermes dashboard --tui --no-open
|
||||
```
|
||||
|
||||
`--tui` exposes the in-browser Chat tab through `/api/pty`. Omit it if you only need the config/session dashboard.
|
||||
|
||||
In another terminal, start the Vite dev server:
|
||||
|
||||
```bash
|
||||
cd apps/dashboard
|
||||
npm run dev
|
||||
```
|
||||
|
||||
The Vite dev server proxies `/api`, `/api/pty`, and `/dashboard-plugins` to `http://127.0.0.1:9119` (the FastAPI backend). It also fetches the backend's `index.html` on each dev page load so the ephemeral session token stays in sync.
|
||||
|
||||
If the `hermes` entry point is not installed, use:
|
||||
|
||||
```bash
|
||||
python -m hermes_cli.main dashboard --tui --no-open
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
npm run build
|
||||
```
|
||||
|
||||
This outputs to `../../hermes_cli/web_dist/`, which the FastAPI server serves as a static SPA. The built assets are included in the Python package via `pyproject.toml` package-data.
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
src/
|
||||
├── components/ui/ # Reusable UI primitives (Card, Badge, Button, Input, etc.)
|
||||
├── lib/
|
||||
│ ├── api.ts # API client — typed fetch wrappers for all backend endpoints
|
||||
│ └── utils.ts # cn() helper for Tailwind class merging
|
||||
├── pages/
|
||||
│ ├── StatusPage # Agent status, active/recent sessions
|
||||
│ ├── ConfigPage # Dynamic config editor (reads schema from backend)
|
||||
│ └── EnvPage # API key management with save/clear
|
||||
├── App.tsx # Main layout and navigation
|
||||
├── main.tsx # React entry point
|
||||
└── index.css # Tailwind imports and theme variables
|
||||
```
|
||||
@@ -1,23 +0,0 @@
|
||||
import js from '@eslint/js'
|
||||
import globals from 'globals'
|
||||
import reactHooks from 'eslint-plugin-react-hooks'
|
||||
import reactRefresh from 'eslint-plugin-react-refresh'
|
||||
import tseslint from 'typescript-eslint'
|
||||
import { defineConfig, globalIgnores } from 'eslint/config'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
js.configs.recommended,
|
||||
tseslint.configs.recommended,
|
||||
reactHooks.configs.flat.recommended,
|
||||
reactRefresh.configs.vite,
|
||||
],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2020,
|
||||
globals: globals.browser,
|
||||
},
|
||||
},
|
||||
])
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user