mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-08 11:47:09 +08:00
Compare commits
41 Commits
feat/head-
...
feat/devex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4d7e6a29e | ||
|
|
172a38c344 | ||
|
|
8bc0d4f77d | ||
|
|
8eabdefa8a | ||
|
|
f658af45c2 | ||
|
|
5212644861 | ||
|
|
1151f84351 | ||
|
|
9abd6bf342 | ||
|
|
d2c7ef6b41 | ||
|
|
a34102049b | ||
|
|
ef5d811aba | ||
|
|
2d44ed1c5b | ||
|
|
fa2e72ae9c | ||
|
|
5bfc4ed53b | ||
|
|
520aec20e0 | ||
|
|
64bec1d060 | ||
|
|
ac58309dbd | ||
|
|
a5a5d82a21 | ||
|
|
34e8d088c2 | ||
|
|
c754135965 | ||
|
|
c6b75baad0 | ||
|
|
a7ad6f6d28 | ||
|
|
1a2141d04d | ||
|
|
ff3f3169b2 | ||
|
|
f4580b6010 | ||
|
|
7b63a787b3 | ||
|
|
069570d103 | ||
|
|
0dafdcab86 | ||
|
|
654e16187e | ||
|
|
732c66b0f3 | ||
|
|
1f0944de21 | ||
|
|
f1a1b58319 | ||
|
|
c21d77ca08 | ||
|
|
d6c710706f | ||
|
|
a6d3becd6a | ||
|
|
3b67606c42 | ||
|
|
763c6d104d | ||
|
|
37752ff1ac | ||
|
|
36214d14db | ||
|
|
15561ec425 | ||
|
|
7d79ce92ac |
18
.editorconfig
Normal file
18
.editorconfig
Normal file
@@ -0,0 +1,18 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
end_of_line = lf
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml,json,toml}]
|
||||
indent_size = 2
|
||||
|
||||
[*.md]
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -46,7 +46,7 @@ Fixes #
|
||||
- [ ] My commit messages follow [Conventional Commits](https://www.conventionalcommits.org/) (`fix(scope):`, `feat(scope):`, etc.)
|
||||
- [ ] I searched for [existing PRs](https://github.com/NousResearch/hermes-agent/pulls) to make sure this isn't a duplicate
|
||||
- [ ] My PR contains **only** changes related to this fix/feature (no unrelated commits)
|
||||
- [ ] I've run `pytest tests/ -q` and all tests pass
|
||||
- [ ] I've run `make check` (lint + test) and all checks pass
|
||||
- [ ] I've added tests for my changes (required for bug fixes, strongly encouraged for features)
|
||||
- [ ] I've tested on my platform: <!-- e.g. Ubuntu 24.04, macOS 15.2, Windows 11 -->
|
||||
|
||||
|
||||
41
.github/workflows/tests.yml
vendored
41
.github/workflows/tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Tests
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,37 +6,42 @@ on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
# Cancel in-progress runs for the same PR/branch
|
||||
concurrency:
|
||||
group: tests-${{ github.ref }}
|
||||
group: ci-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
SRC: >-
|
||||
run_agent.py model_tools.py toolsets.py cli.py hermes_state.py batch_runner.py
|
||||
tools/ hermes_cli/ gateway/ agent/ cron/
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: astral-sh/setup-uv@v5
|
||||
- run: uvx ruff check $SRC
|
||||
- run: uvx ruff format --check $SRC
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
|
||||
- name: Set up Python 3.11
|
||||
run: uv python install 3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
- uses: actions/checkout@v4
|
||||
- uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
enable-cache: true
|
||||
- run: uv python install 3.11
|
||||
- run: |
|
||||
uv venv .venv --python 3.11
|
||||
source .venv/bin/activate
|
||||
uv pip install -e ".[all,dev]"
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
- run: |
|
||||
source .venv/bin/activate
|
||||
python -m pytest tests/ -q --ignore=tests/integration --tb=short
|
||||
env:
|
||||
# Ensure tests don't accidentally call real APIs
|
||||
OPENROUTER_API_KEY: ""
|
||||
OPENAI_API_KEY: ""
|
||||
NOUS_API_KEY: ""
|
||||
|
||||
80
.gitignore
vendored
80
.gitignore
vendored
@@ -1,51 +1,53 @@
|
||||
/venv/
|
||||
/_pycache/
|
||||
*.pyc*
|
||||
# Python
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.egg-info/
|
||||
dist/
|
||||
build/
|
||||
|
||||
# Environments
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# Tools
|
||||
.ruff_cache/
|
||||
.mypy_cache/
|
||||
.pytest_cache/
|
||||
|
||||
# Editors
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# Secrets & config
|
||||
.env
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.development
|
||||
.env.test
|
||||
export*
|
||||
__pycache__/model_tools.cpython-310.pyc
|
||||
__pycache__/web_tools.cpython-310.pyc
|
||||
.env.*.local
|
||||
*.pem
|
||||
*.ppk
|
||||
|
||||
# Node
|
||||
node_modules/
|
||||
|
||||
# Project-specific
|
||||
logs/
|
||||
data/
|
||||
.pytest_cache/
|
||||
tmp/
|
||||
temp_vision_images/
|
||||
hermes-*/*
|
||||
examples/
|
||||
tests/quick_test_dataset.jsonl
|
||||
tests/sample_dataset.jsonl
|
||||
run_datagen_kimik2-thinking.sh
|
||||
run_datagen_megascience_glm4-6.sh
|
||||
run_datagen_sonnet.sh
|
||||
source-data/*
|
||||
run_datagen_megascience_glm4-6.sh
|
||||
data/*
|
||||
node_modules/
|
||||
wandb/
|
||||
images/
|
||||
browser-use/
|
||||
agent-browser/
|
||||
# Private keys
|
||||
*.ppk
|
||||
*.pem
|
||||
privvy*
|
||||
images/
|
||||
__pycache__/
|
||||
hermes_agent.egg-info/
|
||||
wandb/
|
||||
testlogs
|
||||
|
||||
# CLI config (may contain sensitive SSH paths)
|
||||
source-data/
|
||||
testlogs/
|
||||
ignored/
|
||||
.worktrees/
|
||||
temp_vision_images/
|
||||
cli-config.yaml
|
||||
|
||||
# Skills Hub state (lives in ~/.hermes/skills/.hub/ at runtime, but just in case)
|
||||
skills/.hub/
|
||||
ignored/
|
||||
.worktrees/
|
||||
hermes-*/*
|
||||
examples/
|
||||
export*
|
||||
privvy*
|
||||
run_datagen_*.sh
|
||||
tests/quick_test_dataset.jsonl
|
||||
tests/sample_dataset.jsonl
|
||||
|
||||
18
.pre-commit-config.yaml
Normal file
18
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.15.5
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
- id: ruff-format
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-merge-conflict
|
||||
- id: check-yaml
|
||||
args: [--allow-multiple-documents]
|
||||
- id: check-added-large-files
|
||||
args: [--maxkb=500]
|
||||
23
AGENTS.md
23
AGENTS.md
@@ -5,7 +5,8 @@ Instructions for AI coding assistants and developers working on the hermes-agent
|
||||
## Development Environment
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate # ALWAYS activate before running Python
|
||||
make setup # First time: creates .venv, installs deps, sets up pre-commit
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
@@ -228,15 +229,27 @@ The `_isolate_hermes_home` autouse fixture in `tests/conftest.py` redirects `HER
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
make setup # First time: .venv + deps + pre-commit hooks
|
||||
make check # Lint + test (mirrors CI — run before pushing)
|
||||
make lint # Ruff check
|
||||
make fmt # Ruff format + auto-fix
|
||||
make test # Full test suite (~2500 tests, ~2 min)
|
||||
make test-fast # Tests with fail-fast (-x)
|
||||
make test-watch # Rerun tests on file changes
|
||||
make dev-cli # Auto-restart CLI on file changes
|
||||
make dev-gateway # Auto-restart gateway on file changes
|
||||
```
|
||||
|
||||
For targeted testing, use `pytest` directly:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
python -m pytest tests/ -q # Full suite (~2500 tests, ~2 min)
|
||||
python -m pytest tests/test_model_tools.py -q # Toolset resolution
|
||||
python -m pytest tests/test_cli_init.py -q # CLI config loading
|
||||
python -m pytest tests/gateway/ -q # Gateway tests
|
||||
python -m pytest tests/tools/ -q # Tool-level tests
|
||||
```
|
||||
|
||||
Always run the full suite before pushing changes.
|
||||
Formatting is enforced by **ruff** (config in `pyproject.toml`). Pre-commit hooks run on every commit.
|
||||
|
||||
@@ -65,18 +65,7 @@ If your skill is specialized, community-contributed, or niche, it's better suite
|
||||
```bash
|
||||
git clone --recurse-submodules https://github.com/NousResearch/hermes-agent.git
|
||||
cd hermes-agent
|
||||
|
||||
# Create venv with Python 3.11
|
||||
uv venv venv --python 3.11
|
||||
export VIRTUAL_ENV="$(pwd)/venv"
|
||||
|
||||
# Install with all extras (messaging, cron, CLI menus, dev tools)
|
||||
uv pip install -e ".[all,dev]"
|
||||
uv pip install -e "./mini-swe-agent"
|
||||
uv pip install -e "./tinker-atropos"
|
||||
|
||||
# Optional: browser tools
|
||||
npm install
|
||||
make setup # creates .venv, installs all deps
|
||||
```
|
||||
|
||||
### Configure for development
|
||||
@@ -90,22 +79,16 @@ touch ~/.hermes/.env
|
||||
echo 'OPENROUTER_API_KEY=sk-or-v1-your-key' >> ~/.hermes/.env
|
||||
```
|
||||
|
||||
### Run
|
||||
### Common commands
|
||||
|
||||
```bash
|
||||
# Symlink for global access
|
||||
mkdir -p ~/.local/bin
|
||||
ln -sf "$(pwd)/venv/bin/hermes" ~/.local/bin/hermes
|
||||
|
||||
# Verify
|
||||
hermes doctor
|
||||
hermes chat -q "Hello"
|
||||
```
|
||||
|
||||
### Run tests
|
||||
|
||||
```bash
|
||||
pytest tests/ -v
|
||||
make test # run unit tests
|
||||
make lint # ruff check
|
||||
make fmt # ruff format + fix
|
||||
make check # lint + test (same as CI)
|
||||
make dev-cli # auto-restart hermes CLI on file changes
|
||||
make dev-gateway # auto-restart gateway on file changes
|
||||
make test-watch # rerun tests on file changes
|
||||
```
|
||||
|
||||
---
|
||||
@@ -227,7 +210,7 @@ User message → AIAgent._run_agent_loop()
|
||||
|
||||
## Code Style
|
||||
|
||||
- **PEP 8** with practical exceptions (we don't enforce strict line length)
|
||||
- **Formatting**: Enforced by **ruff** (config in `pyproject.toml`). Run `make fmt` to auto-fix, `make lint` to check. Pre-commit hooks handle this automatically.
|
||||
- **Comments**: Only when explaining non-obvious intent, trade-offs, or API quirks. Don't narrate what the code does — `# increment counter` adds nothing
|
||||
- **Error handling**: Catch specific exceptions. Log with `logger.warning()`/`logger.error()` — use `exc_info=True` for unexpected errors so stack traces appear in logs
|
||||
- **Cross-platform**: Never assume Unix. See [Cross-Platform Compatibility](#cross-platform-compatibility)
|
||||
@@ -457,7 +440,7 @@ refactor/description # Code restructuring
|
||||
|
||||
### Before submitting
|
||||
|
||||
1. **Run tests**: `pytest tests/ -v`
|
||||
1. **Run checks**: `make check` (lint + test — same as CI)
|
||||
2. **Test manually**: Run `hermes` and exercise the code path you changed
|
||||
3. **Check cross-platform impact**: If you touch file I/O, process management, or terminal handling, consider Windows and macOS
|
||||
4. **Keep PRs focused**: One logical change per PR. Don't mix a bug fix with a refactor with a new feature.
|
||||
|
||||
69
Makefile
Normal file
69
Makefile
Normal file
@@ -0,0 +1,69 @@
|
||||
.DEFAULT_GOAL := help
|
||||
SHELL := /bin/bash
|
||||
VENV := .venv
|
||||
UV := uv
|
||||
|
||||
SRC := run_agent.py model_tools.py toolsets.py cli.py hermes_state.py batch_runner.py \
|
||||
tools/ hermes_cli/ gateway/ agent/ cron/
|
||||
|
||||
# ─── Setup ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
.PHONY: setup sync clean
|
||||
|
||||
setup: ## Full dev setup (venv + deps + pre-commit)
|
||||
$(UV) venv $(VENV) --python 3.11
|
||||
. $(VENV)/bin/activate && $(UV) pip install -e ".[all,dev]"
|
||||
. $(VENV)/bin/activate && $(UV) pip install -e "./mini-swe-agent"
|
||||
. $(VENV)/bin/activate && pre-commit install
|
||||
@echo "\n✅ Setup complete. Run: source $(VENV)/bin/activate"
|
||||
|
||||
sync: ## Reinstall deps into existing venv
|
||||
. $(VENV)/bin/activate && $(UV) pip install -e ".[all,dev]"
|
||||
|
||||
clean: ## Remove build artifacts and caches
|
||||
rm -rf .ruff_cache .mypy_cache .pytest_cache dist build *.egg-info
|
||||
find . -type d -name __pycache__ -not -path "./.venv/*" -exec rm -rf {} +
|
||||
|
||||
# ─── Quality ────────────────────────────────────────────────────────────────────
|
||||
|
||||
.PHONY: lint fmt check
|
||||
|
||||
lint: ## Check lint + formatting (no changes)
|
||||
. $(VENV)/bin/activate && ruff check $(SRC)
|
||||
. $(VENV)/bin/activate && ruff format --check $(SRC)
|
||||
|
||||
fmt: ## Auto-fix lint + format
|
||||
. $(VENV)/bin/activate && ruff format $(SRC)
|
||||
. $(VENV)/bin/activate && ruff check --fix $(SRC)
|
||||
|
||||
check: lint test ## Lint + test (mirrors CI)
|
||||
|
||||
# ─── Test ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
.PHONY: test test-fast test-watch
|
||||
|
||||
test: ## Run full test suite
|
||||
. $(VENV)/bin/activate && python -m pytest tests/ -q --ignore=tests/integration --tb=short
|
||||
|
||||
test-fast: ## Run tests with fail-fast
|
||||
. $(VENV)/bin/activate && python -m pytest tests/ -q --ignore=tests/integration --tb=short -x
|
||||
|
||||
test-watch: ## Rerun tests on file changes
|
||||
. $(VENV)/bin/activate && python -m watchfiles "python -m pytest tests/ -q --ignore=tests/integration --tb=short -x" $(SRC) tests/
|
||||
|
||||
# ─── Dev Servers ────────────────────────────────────────────────────────────────
|
||||
|
||||
.PHONY: dev-cli dev-gateway
|
||||
|
||||
dev-cli: ## Auto-restart CLI on file changes
|
||||
. $(VENV)/bin/activate && python -m watchfiles "python -m hermes_cli.main" $(SRC)
|
||||
|
||||
dev-gateway: ## Auto-restart gateway on file changes
|
||||
. $(VENV)/bin/activate && python -m watchfiles "python -m gateway.run" $(SRC)
|
||||
|
||||
# ─── Misc ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
.PHONY: help
|
||||
|
||||
help: ## Show this help
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-15s\033[0m %s\n", $$1, $$2}'
|
||||
@@ -95,12 +95,8 @@ Quick start for contributors:
|
||||
```bash
|
||||
git clone --recurse-submodules https://github.com/NousResearch/hermes-agent.git
|
||||
cd hermes-agent
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
uv venv .venv --python 3.11
|
||||
source .venv/bin/activate
|
||||
uv pip install -e ".[all,dev]"
|
||||
uv pip install -e "./mini-swe-agent"
|
||||
python -m pytest tests/ -q
|
||||
make setup # creates .venv, installs everything
|
||||
make check # lint + test (same as CI)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -34,7 +34,7 @@ import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from typing import Any
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
@@ -43,7 +43,7 @@ from hermes_constants import OPENROUTER_BASE_URL
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default auxiliary models for direct API-key providers (cheap/fast for side tasks)
|
||||
_API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = {
|
||||
_API_KEY_PROVIDER_AUX_MODELS: dict[str, str] = {
|
||||
"zai": "glm-4.5-flash",
|
||||
"kimi-coding": "kimi-k2-turbo-preview",
|
||||
"minimax": "MiniMax-M2.5-highspeed",
|
||||
@@ -102,7 +102,7 @@ def _convert_content_for_responses(content: Any) -> Any:
|
||||
if not isinstance(content, list):
|
||||
return str(content) if content else ""
|
||||
|
||||
converted: List[Dict[str, Any]] = []
|
||||
converted: list[dict[str, Any]] = []
|
||||
for part in content:
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
@@ -113,7 +113,7 @@ def _convert_content_for_responses(content: Any) -> Any:
|
||||
# chat.completions nests the URL: {"image_url": {"url": "..."}}
|
||||
image_data = part.get("image_url", {})
|
||||
url = image_data.get("url", "") if isinstance(image_data, dict) else str(image_data)
|
||||
entry: Dict[str, Any] = {"type": "input_image", "image_url": url}
|
||||
entry: dict[str, Any] = {"type": "input_image", "image_url": url}
|
||||
# Preserve detail if specified
|
||||
detail = image_data.get("detail") if isinstance(image_data, dict) else None
|
||||
if detail:
|
||||
@@ -148,19 +148,21 @@ class _CodexCompletionsAdapter:
|
||||
# Convert chat.completions multimodal content blocks to Responses
|
||||
# API format (input_text / input_image instead of text / image_url).
|
||||
instructions = "You are a helpful assistant."
|
||||
input_msgs: List[Dict[str, Any]] = []
|
||||
input_msgs: list[dict[str, Any]] = []
|
||||
for msg in messages:
|
||||
role = msg.get("role", "user")
|
||||
content = msg.get("content") or ""
|
||||
if role == "system":
|
||||
instructions = content if isinstance(content, str) else str(content)
|
||||
else:
|
||||
input_msgs.append({
|
||||
"role": role,
|
||||
"content": _convert_content_for_responses(content),
|
||||
})
|
||||
input_msgs.append(
|
||||
{
|
||||
"role": role,
|
||||
"content": _convert_content_for_responses(content),
|
||||
}
|
||||
)
|
||||
|
||||
resp_kwargs: Dict[str, Any] = {
|
||||
resp_kwargs: dict[str, Any] = {
|
||||
"model": model,
|
||||
"instructions": instructions,
|
||||
"input": input_msgs or [{"role": "user", "content": ""}],
|
||||
@@ -179,18 +181,20 @@ class _CodexCompletionsAdapter:
|
||||
name = fn.get("name")
|
||||
if not name:
|
||||
continue
|
||||
converted.append({
|
||||
"type": "function",
|
||||
"name": name,
|
||||
"description": fn.get("description", ""),
|
||||
"parameters": fn.get("parameters", {}),
|
||||
})
|
||||
converted.append(
|
||||
{
|
||||
"type": "function",
|
||||
"name": name,
|
||||
"description": fn.get("description", ""),
|
||||
"parameters": fn.get("parameters", {}),
|
||||
}
|
||||
)
|
||||
if converted:
|
||||
resp_kwargs["tools"] = converted
|
||||
|
||||
# Stream and collect the response
|
||||
text_parts: List[str] = []
|
||||
tool_calls_raw: List[Any] = []
|
||||
text_parts: list[str] = []
|
||||
tool_calls_raw: list[Any] = []
|
||||
usage = None
|
||||
|
||||
try:
|
||||
@@ -208,14 +212,16 @@ class _CodexCompletionsAdapter:
|
||||
if ptype in ("output_text", "text"):
|
||||
text_parts.append(getattr(part, "text", ""))
|
||||
elif item_type == "function_call":
|
||||
tool_calls_raw.append(SimpleNamespace(
|
||||
id=getattr(item, "call_id", ""),
|
||||
type="function",
|
||||
function=SimpleNamespace(
|
||||
name=getattr(item, "name", ""),
|
||||
arguments=getattr(item, "arguments", "{}"),
|
||||
),
|
||||
))
|
||||
tool_calls_raw.append(
|
||||
SimpleNamespace(
|
||||
id=getattr(item, "call_id", ""),
|
||||
type="function",
|
||||
function=SimpleNamespace(
|
||||
name=getattr(item, "name", ""),
|
||||
arguments=getattr(item, "arguments", "{}"),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
resp_usage = getattr(final, "usage", None)
|
||||
if resp_usage:
|
||||
@@ -285,6 +291,7 @@ class _AsyncCodexCompletionsAdapter:
|
||||
|
||||
async def create(self, **kwargs) -> Any:
|
||||
import asyncio
|
||||
|
||||
return await asyncio.to_thread(self._sync.create, **kwargs)
|
||||
|
||||
|
||||
@@ -304,7 +311,7 @@ class AsyncCodexAuxiliaryClient:
|
||||
self.base_url = sync_wrapper.base_url
|
||||
|
||||
|
||||
def _read_nous_auth() -> Optional[dict]:
|
||||
def _read_nous_auth() -> dict | None:
|
||||
"""Read and validate ~/.hermes/auth.json for an active Nous provider.
|
||||
|
||||
Returns the provider state dict if Nous is active with tokens,
|
||||
@@ -336,10 +343,11 @@ def _nous_base_url() -> str:
|
||||
return os.getenv("NOUS_INFERENCE_BASE_URL", _NOUS_DEFAULT_BASE_URL)
|
||||
|
||||
|
||||
def _read_codex_access_token() -> Optional[str]:
|
||||
def _read_codex_access_token() -> str | None:
|
||||
"""Read a valid Codex OAuth access token from Hermes auth store (~/.hermes/auth.json)."""
|
||||
try:
|
||||
from hermes_cli.auth import _read_codex_tokens
|
||||
|
||||
data = _read_codex_tokens()
|
||||
tokens = data.get("tokens", {})
|
||||
access_token = tokens.get("access_token")
|
||||
@@ -351,7 +359,7 @@ def _read_codex_access_token() -> Optional[str]:
|
||||
return None
|
||||
|
||||
|
||||
def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
def _resolve_api_key_provider() -> tuple[OpenAI | None, str | None]:
|
||||
"""Try each API-key provider in PROVIDER_REGISTRY order.
|
||||
|
||||
Returns (client, model) for the first provider whose env var is set,
|
||||
@@ -398,6 +406,7 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
|
||||
# ── Provider resolution helpers ─────────────────────────────────────────────
|
||||
|
||||
|
||||
def _get_auxiliary_provider(task: str = "") -> str:
|
||||
"""Read the provider override for a specific auxiliary task.
|
||||
|
||||
@@ -413,16 +422,15 @@ def _get_auxiliary_provider(task: str = "") -> str:
|
||||
return "auto"
|
||||
|
||||
|
||||
def _try_openrouter() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
def _try_openrouter() -> tuple[OpenAI | None, str | None]:
|
||||
or_key = os.getenv("OPENROUTER_API_KEY")
|
||||
if not or_key:
|
||||
return None, None
|
||||
logger.debug("Auxiliary client: OpenRouter")
|
||||
return OpenAI(api_key=or_key, base_url=OPENROUTER_BASE_URL,
|
||||
default_headers=_OR_HEADERS), _OPENROUTER_MODEL
|
||||
return OpenAI(api_key=or_key, base_url=OPENROUTER_BASE_URL, default_headers=_OR_HEADERS), _OPENROUTER_MODEL
|
||||
|
||||
|
||||
def _try_nous() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
def _try_nous() -> tuple[OpenAI | None, str | None]:
|
||||
nous = _read_nous_auth()
|
||||
if not nous:
|
||||
return None, None
|
||||
@@ -435,7 +443,7 @@ def _try_nous() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
)
|
||||
|
||||
|
||||
def _try_custom_endpoint() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
def _try_custom_endpoint() -> tuple[OpenAI | None, str | None]:
|
||||
custom_base = os.getenv("OPENAI_BASE_URL")
|
||||
custom_key = os.getenv("OPENAI_API_KEY")
|
||||
if not custom_base or not custom_key:
|
||||
@@ -445,7 +453,7 @@ def _try_custom_endpoint() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
return OpenAI(api_key=custom_key, base_url=custom_base), model
|
||||
|
||||
|
||||
def _try_codex() -> Tuple[Optional[Any], Optional[str]]:
|
||||
def _try_codex() -> tuple[Any | None, str | None]:
|
||||
codex_token = _read_codex_access_token()
|
||||
if not codex_token:
|
||||
return None, None
|
||||
@@ -454,7 +462,7 @@ def _try_codex() -> Tuple[Optional[Any], Optional[str]]:
|
||||
return CodexAuxiliaryClient(real_client, _CODEX_AUX_MODEL), _CODEX_AUX_MODEL
|
||||
|
||||
|
||||
def _resolve_forced_provider(forced: str) -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
def _resolve_forced_provider(forced: str) -> tuple[OpenAI | None, str | None]:
|
||||
"""Resolve a specific forced provider. Returns (None, None) if creds missing."""
|
||||
if forced == "openrouter":
|
||||
client, model = _try_openrouter()
|
||||
@@ -488,10 +496,9 @@ def _resolve_forced_provider(forced: str) -> Tuple[Optional[OpenAI], Optional[st
|
||||
return None, None
|
||||
|
||||
|
||||
def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
def _resolve_auto() -> tuple[OpenAI | None, str | None]:
|
||||
"""Full auto-detection chain: OpenRouter → Nous → custom → Codex → API-key → None."""
|
||||
for try_fn in (_try_openrouter, _try_nous, _try_custom_endpoint,
|
||||
_try_codex, _resolve_api_key_provider):
|
||||
for try_fn in (_try_openrouter, _try_nous, _try_custom_endpoint, _try_codex, _resolve_api_key_provider):
|
||||
client, model = try_fn()
|
||||
if client is not None:
|
||||
return client, model
|
||||
@@ -501,7 +508,8 @@ def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
|
||||
# ── Public API ──────────────────────────────────────────────────────────────
|
||||
|
||||
def get_text_auxiliary_client(task: str = "") -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
|
||||
def get_text_auxiliary_client(task: str = "") -> tuple[OpenAI | None, str | None]:
|
||||
"""Return (client, default_model_slug) for text-only auxiliary tasks.
|
||||
|
||||
Args:
|
||||
@@ -544,7 +552,7 @@ def get_async_text_auxiliary_client(task: str = ""):
|
||||
return AsyncOpenAI(**async_kwargs), model
|
||||
|
||||
|
||||
def get_vision_auxiliary_client() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
def get_vision_auxiliary_client() -> tuple[OpenAI | None, str | None]:
|
||||
"""Return (client, default_model_slug) for vision/multimodal auxiliary tasks.
|
||||
|
||||
Checks AUXILIARY_VISION_PROVIDER for a forced provider, otherwise
|
||||
@@ -560,18 +568,21 @@ def get_vision_auxiliary_client() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
forced = _get_auxiliary_provider("vision")
|
||||
if forced != "auto":
|
||||
return _resolve_forced_provider(forced)
|
||||
# Auto: only multimodal-capable providers
|
||||
for try_fn in (_try_openrouter, _try_nous, _try_codex):
|
||||
# Auto: try providers known to support multimodal first, then fall
|
||||
# back to the user's custom endpoint. Many local models (Qwen-VL,
|
||||
# LLaVA, Pixtral, etc.) support vision — skipping them entirely
|
||||
# caused silent failures for local-only users.
|
||||
for try_fn in (_try_openrouter, _try_nous, _try_codex, _try_custom_endpoint):
|
||||
client, model = try_fn()
|
||||
if client is not None:
|
||||
return client, model
|
||||
logger.debug("Auxiliary vision client: none available (auto only tries OpenRouter/Nous/Codex)")
|
||||
logger.debug("Auxiliary vision client: none available")
|
||||
return None, None
|
||||
|
||||
|
||||
def get_auxiliary_extra_body() -> dict:
|
||||
"""Return extra_body kwargs for auxiliary API calls.
|
||||
|
||||
|
||||
Includes Nous Portal product tags when the auxiliary client is backed
|
||||
by Nous Portal. Returns empty dict otherwise.
|
||||
"""
|
||||
@@ -580,7 +591,7 @@ def get_auxiliary_extra_body() -> dict:
|
||||
|
||||
def auxiliary_max_tokens_param(value: int) -> dict:
|
||||
"""Return the correct max tokens kwarg for the auxiliary client's provider.
|
||||
|
||||
|
||||
OpenRouter and local models use 'max_tokens'. Direct OpenAI with newer
|
||||
models (gpt-4o, o-series, gpt-5+) requires 'max_completion_tokens'.
|
||||
The Codex adapter translates max_tokens internally, so we use max_tokens
|
||||
@@ -589,8 +600,6 @@ def auxiliary_max_tokens_param(value: int) -> dict:
|
||||
custom_base = os.getenv("OPENAI_BASE_URL", "")
|
||||
or_key = os.getenv("OPENROUTER_API_KEY")
|
||||
# Only use max_completion_tokens for direct OpenAI custom endpoints
|
||||
if (not or_key
|
||||
and _read_nous_auth() is None
|
||||
and "api.openai.com" in custom_base.lower()):
|
||||
if not or_key and _read_nous_auth() is None and "api.openai.com" in custom_base.lower():
|
||||
return {"max_completion_tokens": value}
|
||||
return {"max_tokens": value}
|
||||
|
||||
@@ -7,12 +7,12 @@ protecting head and tail context.
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from agent.auxiliary_client import get_text_auxiliary_client
|
||||
from agent.model_metadata import (
|
||||
get_model_context_length,
|
||||
estimate_messages_tokens_rough,
|
||||
get_model_context_length,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -56,7 +56,7 @@ class ContextCompressor:
|
||||
self.client, default_model = get_text_auxiliary_client("compression")
|
||||
self.summary_model = summary_model_override or default_model
|
||||
|
||||
def update_from_response(self, usage: Dict[str, Any]):
|
||||
def update_from_response(self, usage: dict[str, Any]):
|
||||
"""Update tracked token usage from API response."""
|
||||
self.last_prompt_tokens = usage.get("prompt_tokens", 0)
|
||||
self.last_completion_tokens = usage.get("completion_tokens", 0)
|
||||
@@ -67,12 +67,12 @@ class ContextCompressor:
|
||||
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
|
||||
return tokens >= self.threshold_tokens
|
||||
|
||||
def should_compress_preflight(self, messages: List[Dict[str, Any]]) -> bool:
|
||||
def should_compress_preflight(self, messages: list[dict[str, Any]]) -> bool:
|
||||
"""Quick pre-flight check using rough estimate (before API call)."""
|
||||
rough_estimate = estimate_messages_tokens_rough(messages)
|
||||
return rough_estimate >= self.threshold_tokens
|
||||
|
||||
def get_status(self) -> Dict[str, Any]:
|
||||
def get_status(self) -> dict[str, Any]:
|
||||
"""Get current compression status for display/logging."""
|
||||
return {
|
||||
"last_prompt_tokens": self.last_prompt_tokens,
|
||||
@@ -82,7 +82,7 @@ class ContextCompressor:
|
||||
"compression_count": self.compression_count,
|
||||
}
|
||||
|
||||
def _generate_summary(self, turns_to_summarize: List[Dict[str, Any]]) -> Optional[str]:
|
||||
def _generate_summary(self, turns_to_summarize: list[dict[str, Any]]) -> str | None:
|
||||
"""Generate a concise summary of conversation turns.
|
||||
|
||||
Tries the auxiliary model first, then falls back to the user's main
|
||||
@@ -140,7 +140,9 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
logging.warning(f"Main model summary also failed: {fallback_err}")
|
||||
|
||||
# 3. All models failed — return None so the caller drops turns without a summary
|
||||
logging.warning("Context compression: no model available for summary. Middle turns will be dropped without summary.")
|
||||
logging.warning(
|
||||
"Context compression: no model available for summary. Middle turns will be dropped without summary."
|
||||
)
|
||||
return None
|
||||
|
||||
def _call_summary_model(self, client, model: str, prompt: str) -> str:
|
||||
@@ -186,12 +188,14 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
|
||||
# Don't fallback to the same provider that just failed
|
||||
from hermes_constants import OPENROUTER_BASE_URL
|
||||
|
||||
if custom_base.rstrip("/") == OPENROUTER_BASE_URL.rstrip("/"):
|
||||
return None, None
|
||||
|
||||
model = os.getenv("LLM_MODEL") or os.getenv("OPENAI_MODEL") or self.model
|
||||
try:
|
||||
from openai import OpenAI as _OpenAI
|
||||
|
||||
client = _OpenAI(api_key=custom_key, base_url=custom_base)
|
||||
logger.debug("Built fallback auxiliary client: %s via %s", model, custom_base)
|
||||
return client, model
|
||||
@@ -210,7 +214,7 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
return tc.get("id", "")
|
||||
return getattr(tc, "id", "") or ""
|
||||
|
||||
def _sanitize_tool_pairs(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
def _sanitize_tool_pairs(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""Fix orphaned tool_call / tool_result pairs after compression.
|
||||
|
||||
Two failure modes:
|
||||
@@ -243,8 +247,7 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
orphaned_results = result_call_ids - surviving_call_ids
|
||||
if orphaned_results:
|
||||
messages = [
|
||||
m for m in messages
|
||||
if not (m.get("role") == "tool" and m.get("tool_call_id") in orphaned_results)
|
||||
m for m in messages if not (m.get("role") == "tool" and m.get("tool_call_id") in orphaned_results)
|
||||
]
|
||||
if not self.quiet_mode:
|
||||
logger.info("Compression sanitizer: removed %d orphaned tool result(s)", len(orphaned_results))
|
||||
@@ -252,25 +255,27 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
# 2. Add stub results for assistant tool_calls whose results were dropped
|
||||
missing_results = surviving_call_ids - result_call_ids
|
||||
if missing_results:
|
||||
patched: List[Dict[str, Any]] = []
|
||||
patched: list[dict[str, Any]] = []
|
||||
for msg in messages:
|
||||
patched.append(msg)
|
||||
if msg.get("role") == "assistant":
|
||||
for tc in msg.get("tool_calls") or []:
|
||||
cid = self._get_tool_call_id(tc)
|
||||
if cid in missing_results:
|
||||
patched.append({
|
||||
"role": "tool",
|
||||
"content": "[Result from earlier conversation — see context summary above]",
|
||||
"tool_call_id": cid,
|
||||
})
|
||||
patched.append(
|
||||
{
|
||||
"role": "tool",
|
||||
"content": "[Result from earlier conversation — see context summary above]",
|
||||
"tool_call_id": cid,
|
||||
}
|
||||
)
|
||||
messages = patched
|
||||
if not self.quiet_mode:
|
||||
logger.info("Compression sanitizer: added %d stub tool result(s)", len(missing_results))
|
||||
|
||||
return messages
|
||||
|
||||
def _align_boundary_forward(self, messages: List[Dict[str, Any]], idx: int) -> int:
|
||||
def _align_boundary_forward(self, messages: list[dict[str, Any]], idx: int) -> int:
|
||||
"""Push a compress-start boundary forward past any orphan tool results.
|
||||
|
||||
If ``messages[idx]`` is a tool result, slide forward until we hit a
|
||||
@@ -280,7 +285,7 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
idx += 1
|
||||
return idx
|
||||
|
||||
def _align_boundary_backward(self, messages: List[Dict[str, Any]], idx: int) -> int:
|
||||
def _align_boundary_backward(self, messages: list[dict[str, Any]], idx: int) -> int:
|
||||
"""Pull a compress-end boundary backward to avoid splitting a
|
||||
tool_call / result group.
|
||||
|
||||
@@ -298,7 +303,7 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
idx -= 1
|
||||
return idx
|
||||
|
||||
def compress(self, messages: List[Dict[str, Any]], current_tokens: int = None) -> List[Dict[str, Any]]:
|
||||
def compress(self, messages: list[dict[str, Any]], current_tokens: int = None) -> list[dict[str, Any]]:
|
||||
"""Compress conversation messages by summarizing middle turns.
|
||||
|
||||
Keeps first N + last N turns, summarizes everything in between.
|
||||
@@ -308,7 +313,9 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
n_messages = len(messages)
|
||||
if n_messages <= self.protect_first_n + self.protect_last_n + 1:
|
||||
if not self.quiet_mode:
|
||||
print(f"⚠️ Cannot compress: only {n_messages} messages (need > {self.protect_first_n + self.protect_last_n + 1})")
|
||||
print(
|
||||
f"⚠️ Cannot compress: only {n_messages} messages (need > {self.protect_first_n + self.protect_last_n + 1})"
|
||||
)
|
||||
return messages
|
||||
|
||||
compress_start = self.protect_first_n
|
||||
@@ -323,14 +330,20 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
return messages
|
||||
|
||||
turns_to_summarize = messages[compress_start:compress_end]
|
||||
display_tokens = current_tokens if current_tokens else self.last_prompt_tokens or estimate_messages_tokens_rough(messages)
|
||||
display_tokens = (
|
||||
current_tokens if current_tokens else self.last_prompt_tokens or estimate_messages_tokens_rough(messages)
|
||||
)
|
||||
|
||||
if not self.quiet_mode:
|
||||
print(f"\n📦 Context compression triggered ({display_tokens:,} tokens ≥ {self.threshold_tokens:,} threshold)")
|
||||
print(f" 📊 Model context limit: {self.context_length:,} tokens ({self.threshold_percent*100:.0f}% = {self.threshold_tokens:,})")
|
||||
print(
|
||||
f"\n📦 Context compression triggered ({display_tokens:,} tokens ≥ {self.threshold_tokens:,} threshold)"
|
||||
)
|
||||
print(
|
||||
f" 📊 Model context limit: {self.context_length:,} tokens ({self.threshold_percent * 100:.0f}% = {self.threshold_tokens:,})"
|
||||
)
|
||||
|
||||
if not self.quiet_mode:
|
||||
print(f" 🗜️ Summarizing turns {compress_start+1}-{compress_end} ({len(turns_to_summarize)} turns)")
|
||||
print(f" 🗜️ Summarizing turns {compress_start + 1}-{compress_end} ({len(turns_to_summarize)} turns)")
|
||||
|
||||
summary = self._generate_summary(turns_to_summarize)
|
||||
|
||||
@@ -338,7 +351,9 @@ Write only the summary, starting with "[CONTEXT SUMMARY]:" prefix."""
|
||||
for i in range(compress_start):
|
||||
msg = messages[i].copy()
|
||||
if i == 0 and msg.get("role") == "system" and self.compression_count == 0:
|
||||
msg["content"] = (msg.get("content") or "") + "\n\n[Note: Some earlier conversation turns may be summarized to preserve context space.]"
|
||||
msg["content"] = (
|
||||
msg.get("content") or ""
|
||||
) + "\n\n[Note: Some earlier conversation turns may be summarized to preserve context space.]"
|
||||
compressed.append(msg)
|
||||
|
||||
if summary:
|
||||
|
||||
282
agent/display.py
282
agent/display.py
@@ -6,7 +6,6 @@ Used by AIAgent._execute_tool_calls for CLI feedback.
|
||||
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
@@ -20,19 +19,31 @@ _RESET = "\033[0m"
|
||||
# Tool preview (one-line summary of a tool call's primary argument)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def build_tool_preview(tool_name: str, args: dict, max_len: int = 40) -> str:
|
||||
"""Build a short preview of a tool call's primary argument for display."""
|
||||
primary_args = {
|
||||
"terminal": "command", "web_search": "query", "web_extract": "urls",
|
||||
"read_file": "path", "write_file": "path", "patch": "path",
|
||||
"search_files": "pattern", "browser_navigate": "url",
|
||||
"browser_click": "ref", "browser_type": "text",
|
||||
"image_generate": "prompt", "text_to_speech": "text",
|
||||
"vision_analyze": "question", "mixture_of_agents": "user_prompt",
|
||||
"skill_view": "name", "skills_list": "category",
|
||||
"terminal": "command",
|
||||
"web_search": "query",
|
||||
"web_extract": "urls",
|
||||
"read_file": "path",
|
||||
"write_file": "path",
|
||||
"patch": "path",
|
||||
"search_files": "pattern",
|
||||
"browser_navigate": "url",
|
||||
"browser_click": "ref",
|
||||
"browser_type": "text",
|
||||
"image_generate": "prompt",
|
||||
"text_to_speech": "text",
|
||||
"vision_analyze": "question",
|
||||
"mixture_of_agents": "user_prompt",
|
||||
"skill_view": "name",
|
||||
"skills_list": "category",
|
||||
"schedule_cronjob": "name",
|
||||
"execute_code": "code", "delegate_task": "goal",
|
||||
"clarify": "question", "skill_manage": "name",
|
||||
"execute_code": "code",
|
||||
"delegate_task": "goal",
|
||||
"clarify": "question",
|
||||
"skill_manage": "name",
|
||||
}
|
||||
|
||||
if tool_name == "process":
|
||||
@@ -61,18 +72,18 @@ def build_tool_preview(tool_name: str, args: dict, max_len: int = 40) -> str:
|
||||
|
||||
if tool_name == "session_search":
|
||||
query = args.get("query", "")
|
||||
return f"recall: \"{query[:25]}{'...' if len(query) > 25 else ''}\""
|
||||
return f'recall: "{query[:25]}{"..." if len(query) > 25 else ""}"'
|
||||
|
||||
if tool_name == "memory":
|
||||
action = args.get("action", "")
|
||||
target = args.get("target", "")
|
||||
if action == "add":
|
||||
content = args.get("content", "")
|
||||
return f"+{target}: \"{content[:25]}{'...' if len(content) > 25 else ''}\""
|
||||
return f'+{target}: "{content[:25]}{"..." if len(content) > 25 else ""}"'
|
||||
elif action == "replace":
|
||||
return f"~{target}: \"{args.get('old_text', '')[:20]}\""
|
||||
return f'~{target}: "{args.get("old_text", "")[:20]}"'
|
||||
elif action == "remove":
|
||||
return f"-{target}: \"{args.get('old_text', '')[:20]}\""
|
||||
return f'-{target}: "{args.get("old_text", "")[:20]}"'
|
||||
return action
|
||||
|
||||
if tool_name == "send_message":
|
||||
@@ -80,7 +91,7 @@ def build_tool_preview(tool_name: str, args: dict, max_len: int = 40) -> str:
|
||||
msg = args.get("message", "")
|
||||
if len(msg) > 20:
|
||||
msg = msg[:17] + "..."
|
||||
return f"to {target}: \"{msg}\""
|
||||
return f'to {target}: "{msg}"'
|
||||
|
||||
if tool_name.startswith("rl_"):
|
||||
rl_previews = {
|
||||
@@ -115,7 +126,7 @@ def build_tool_preview(tool_name: str, args: dict, max_len: int = 40) -> str:
|
||||
if not preview:
|
||||
return None
|
||||
if len(preview) > max_len:
|
||||
preview = preview[:max_len - 3] + "..."
|
||||
preview = preview[: max_len - 3] + "..."
|
||||
return preview
|
||||
|
||||
|
||||
@@ -123,41 +134,74 @@ def build_tool_preview(tool_name: str, args: dict, max_len: int = 40) -> str:
|
||||
# KawaiiSpinner
|
||||
# =========================================================================
|
||||
|
||||
|
||||
class KawaiiSpinner:
|
||||
"""Animated spinner with kawaii faces for CLI feedback during tool execution."""
|
||||
|
||||
SPINNERS = {
|
||||
'dots': ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'],
|
||||
'bounce': ['⠁', '⠂', '⠄', '⡀', '⢀', '⠠', '⠐', '⠈'],
|
||||
'grow': ['▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', '▇', '▆', '▅', '▄', '▃', '▂'],
|
||||
'arrows': ['←', '↖', '↑', '↗', '→', '↘', '↓', '↙'],
|
||||
'star': ['✶', '✷', '✸', '✹', '✺', '✹', '✸', '✷'],
|
||||
'moon': ['🌑', '🌒', '🌓', '🌔', '🌕', '🌖', '🌗', '🌘'],
|
||||
'pulse': ['◜', '◠', '◝', '◞', '◡', '◟'],
|
||||
'brain': ['🧠', '💭', '💡', '✨', '💫', '🌟', '💡', '💭'],
|
||||
'sparkle': ['⁺', '˚', '*', '✧', '✦', '✧', '*', '˚'],
|
||||
"dots": ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"],
|
||||
"bounce": ["⠁", "⠂", "⠄", "⡀", "⢀", "⠠", "⠐", "⠈"],
|
||||
"grow": ["▁", "▂", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃", "▂"],
|
||||
"arrows": ["←", "↖", "↑", "↗", "→", "↘", "↓", "↙"],
|
||||
"star": ["✶", "✷", "✸", "✹", "✺", "✹", "✸", "✷"],
|
||||
"moon": ["🌑", "🌒", "🌓", "🌔", "🌕", "🌖", "🌗", "🌘"],
|
||||
"pulse": ["◜", "◠", "◝", "◞", "◡", "◟"],
|
||||
"brain": ["🧠", "💭", "💡", "✨", "💫", "🌟", "💡", "💭"],
|
||||
"sparkle": ["⁺", "˚", "*", "✧", "✦", "✧", "*", "˚"],
|
||||
}
|
||||
|
||||
KAWAII_WAITING = [
|
||||
"(。◕‿◕。)", "(◕‿◕✿)", "٩(◕‿◕。)۶", "(✿◠‿◠)", "( ˘▽˘)っ",
|
||||
"♪(´ε` )", "(◕ᴗ◕✿)", "ヾ(^∇^)", "(≧◡≦)", "(★ω★)",
|
||||
"(。◕‿◕。)",
|
||||
"(◕‿◕✿)",
|
||||
"٩(◕‿◕。)۶",
|
||||
"(✿◠‿◠)",
|
||||
"( ˘▽˘)っ",
|
||||
"♪(´ε` )",
|
||||
"(◕ᴗ◕✿)",
|
||||
"ヾ(^∇^)",
|
||||
"(≧◡≦)",
|
||||
"(★ω★)",
|
||||
]
|
||||
|
||||
KAWAII_THINKING = [
|
||||
"(。•́︿•̀。)", "(◔_◔)", "(¬‿¬)", "( •_•)>⌐■-■", "(⌐■_■)",
|
||||
"(´・_・`)", "◉_◉", "(°ロ°)", "( ˘⌣˘)♡", "ヽ(>∀<☆)☆",
|
||||
"٩(๑❛ᴗ❛๑)۶", "(⊙_⊙)", "(¬_¬)", "( ͡° ͜ʖ ͡°)", "ಠ_ಠ",
|
||||
"(。•́︿•̀。)",
|
||||
"(◔_◔)",
|
||||
"(¬‿¬)",
|
||||
"( •_•)>⌐■-■",
|
||||
"(⌐■_■)",
|
||||
"(´・_・`)",
|
||||
"◉_◉",
|
||||
"(°ロ°)",
|
||||
"( ˘⌣˘)♡",
|
||||
"ヽ(>∀<☆)☆",
|
||||
"٩(๑❛ᴗ❛๑)۶",
|
||||
"(⊙_⊙)",
|
||||
"(¬_¬)",
|
||||
"( ͡° ͜ʖ ͡°)",
|
||||
"ಠ_ಠ",
|
||||
]
|
||||
|
||||
THINKING_VERBS = [
|
||||
"pondering", "contemplating", "musing", "cogitating", "ruminating",
|
||||
"deliberating", "mulling", "reflecting", "processing", "reasoning",
|
||||
"analyzing", "computing", "synthesizing", "formulating", "brainstorming",
|
||||
"pondering",
|
||||
"contemplating",
|
||||
"musing",
|
||||
"cogitating",
|
||||
"ruminating",
|
||||
"deliberating",
|
||||
"mulling",
|
||||
"reflecting",
|
||||
"processing",
|
||||
"reasoning",
|
||||
"analyzing",
|
||||
"computing",
|
||||
"synthesizing",
|
||||
"formulating",
|
||||
"brainstorming",
|
||||
]
|
||||
|
||||
def __init__(self, message: str = "", spinner_type: str = 'dots'):
|
||||
def __init__(self, message: str = "", spinner_type: str = "dots"):
|
||||
self.message = message
|
||||
self.spinner_frames = self.SPINNERS.get(spinner_type, self.SPINNERS['dots'])
|
||||
self.spinner_frames = self.SPINNERS.get(spinner_type, self.SPINNERS["dots"])
|
||||
self.running = False
|
||||
self.thread = None
|
||||
self.frame_idx = 0
|
||||
@@ -167,7 +211,7 @@ class KawaiiSpinner:
|
||||
# child agents can replace sys.stdout with a black hole.
|
||||
self._out = sys.stdout
|
||||
|
||||
def _write(self, text: str, end: str = '\n', flush: bool = False):
|
||||
def _write(self, text: str, end: str = "\n", flush: bool = False):
|
||||
"""Write to the stdout captured at spinner creation time."""
|
||||
try:
|
||||
self._out.write(text + end)
|
||||
@@ -185,7 +229,7 @@ class KawaiiSpinner:
|
||||
elapsed = time.time() - self.start_time
|
||||
line = f" {frame} {self.message} ({elapsed:.1f}s)"
|
||||
pad = max(self.last_line_len - len(line), 0)
|
||||
self._write(f"\r{line}{' ' * pad}", end='', flush=True)
|
||||
self._write(f"\r{line}{' ' * pad}", end="", flush=True)
|
||||
self.last_line_len = len(line)
|
||||
self.frame_idx += 1
|
||||
time.sleep(0.12)
|
||||
@@ -216,7 +260,7 @@ class KawaiiSpinner:
|
||||
# Clear spinner line with spaces (not \033[K) to avoid garbled escape
|
||||
# codes when prompt_toolkit's patch_stdout is active — same approach
|
||||
# as stop(). Then print text; spinner redraws on next tick.
|
||||
blanks = ' ' * max(self.last_line_len + 5, 40)
|
||||
blanks = " " * max(self.last_line_len + 5, 40)
|
||||
self._write(f"\r{blanks}\r {text}", flush=True)
|
||||
|
||||
def stop(self, final_message: str = None):
|
||||
@@ -225,8 +269,8 @@ class KawaiiSpinner:
|
||||
self.thread.join(timeout=0.5)
|
||||
# Clear the spinner line with spaces instead of \033[K to avoid
|
||||
# garbled escape codes when prompt_toolkit's patch_stdout is active.
|
||||
blanks = ' ' * max(self.last_line_len + 5, 40)
|
||||
self._write(f"\r{blanks}\r", end='', flush=True)
|
||||
blanks = " " * max(self.last_line_len + 5, 40)
|
||||
self._write(f"\r{blanks}\r", end="", flush=True)
|
||||
if final_message:
|
||||
self._write(f" {final_message}", flush=True)
|
||||
|
||||
@@ -244,38 +288,110 @@ class KawaiiSpinner:
|
||||
# =========================================================================
|
||||
|
||||
KAWAII_SEARCH = [
|
||||
"♪(´ε` )", "(。◕‿◕。)", "ヾ(^∇^)", "(◕ᴗ◕✿)", "( ˘▽˘)っ",
|
||||
"٩(◕‿◕。)۶", "(✿◠‿◠)", "♪~(´ε` )", "(ノ´ヮ`)ノ*:・゚✧", "\(◎o◎)/",
|
||||
"♪(´ε` )",
|
||||
"(。◕‿◕。)",
|
||||
"ヾ(^∇^)",
|
||||
"(◕ᴗ◕✿)",
|
||||
"( ˘▽˘)っ",
|
||||
"٩(◕‿◕。)۶",
|
||||
"(✿◠‿◠)",
|
||||
"♪~(´ε` )",
|
||||
"(ノ´ヮ`)ノ*:・゚✧",
|
||||
"\(◎o◎)/",
|
||||
]
|
||||
KAWAII_READ = [
|
||||
"φ(゜▽゜*)♪", "( ˘▽˘)っ", "(⌐■_■)", "٩(。•́‿•̀。)۶", "(◕‿◕✿)",
|
||||
"ヾ(@⌒ー⌒@)ノ", "(✧ω✧)", "♪(๑ᴖ◡ᴖ๑)♪", "(≧◡≦)", "( ´ ▽ ` )ノ",
|
||||
"φ(゜▽゜*)♪",
|
||||
"( ˘▽˘)っ",
|
||||
"(⌐■_■)",
|
||||
"٩(。•́‿•̀。)۶",
|
||||
"(◕‿◕✿)",
|
||||
"ヾ(@⌒ー⌒@)ノ",
|
||||
"(✧ω✧)",
|
||||
"♪(๑ᴖ◡ᴖ๑)♪",
|
||||
"(≧◡≦)",
|
||||
"( ´ ▽ ` )ノ",
|
||||
]
|
||||
KAWAII_TERMINAL = [
|
||||
"ヽ(>∀<☆)ノ", "(ノ°∀°)ノ", "٩(^ᴗ^)۶", "ヾ(⌐■_■)ノ♪", "(•̀ᴗ•́)و",
|
||||
"┗(^0^)┓", "(`・ω・´)", "\( ̄▽ ̄)/", "(ง •̀_•́)ง", "ヽ(´▽`)/",
|
||||
"ヽ(>∀<☆)ノ",
|
||||
"(ノ°∀°)ノ",
|
||||
"٩(^ᴗ^)۶",
|
||||
"ヾ(⌐■_■)ノ♪",
|
||||
"(•̀ᴗ•́)و",
|
||||
"┗(^0^)┓",
|
||||
"(`・ω・´)",
|
||||
"\( ̄▽ ̄)/",
|
||||
"(ง •̀_•́)ง",
|
||||
"ヽ(´▽`)/",
|
||||
]
|
||||
KAWAII_BROWSER = [
|
||||
"(ノ°∀°)ノ", "(☞゚ヮ゚)☞", "( ͡° ͜ʖ ͡°)", "┌( ಠ_ಠ)┘", "(⊙_⊙)?",
|
||||
"ヾ(•ω•`)o", "( ̄ω ̄)", "( ˇωˇ )", "(ᵔᴥᵔ)", "\(◎o◎)/",
|
||||
"(ノ°∀°)ノ",
|
||||
"(☞゚ヮ゚)☞",
|
||||
"( ͡° ͜ʖ ͡°)",
|
||||
"┌( ಠ_ಠ)┘",
|
||||
"(⊙_⊙)?",
|
||||
"ヾ(•ω•`)o",
|
||||
"( ̄ω ̄)",
|
||||
"( ˇωˇ )",
|
||||
"(ᵔᴥᵔ)",
|
||||
"\(◎o◎)/",
|
||||
]
|
||||
KAWAII_CREATE = [
|
||||
"✧*。٩(ˊᗜˋ*)و✧", "(ノ◕ヮ◕)ノ*:・゚✧", "ヽ(>∀<☆)ノ", "٩(♡ε♡)۶", "(◕‿◕)♡",
|
||||
"✿◕ ‿ ◕✿", "(*≧▽≦)", "ヾ(^-^)ノ", "(☆▽☆)", "°˖✧◝(⁰▿⁰)◜✧˖°",
|
||||
"✧*。٩(ˊᗜˋ*)و✧",
|
||||
"(ノ◕ヮ◕)ノ*:・゚✧",
|
||||
"ヽ(>∀<☆)ノ",
|
||||
"٩(♡ε♡)۶",
|
||||
"(◕‿◕)♡",
|
||||
"✿◕ ‿ ◕✿",
|
||||
"(*≧▽≦)",
|
||||
"ヾ(^-^)ノ",
|
||||
"(☆▽☆)",
|
||||
"°˖✧◝(⁰▿⁰)◜✧˖°",
|
||||
]
|
||||
KAWAII_SKILL = [
|
||||
"ヾ(@⌒ー⌒@)ノ", "(๑˃ᴗ˂)ﻭ", "٩(◕‿◕。)۶", "(✿╹◡╹)", "ヽ(・∀・)ノ",
|
||||
"(ノ´ヮ`)ノ*:・゚✧", "♪(๑ᴖ◡ᴖ๑)♪", "(◠‿◠)", "٩(ˊᗜˋ*)و", "(^▽^)",
|
||||
"ヾ(^∇^)", "(★ω★)/", "٩(。•́‿•̀。)۶", "(◕ᴗ◕✿)", "\(◎o◎)/",
|
||||
"(✧ω✧)", "ヽ(>∀<☆)ノ", "( ˘▽˘)っ", "(≧◡≦) ♡", "ヾ( ̄▽ ̄)",
|
||||
"ヾ(@⌒ー⌒@)ノ",
|
||||
"(๑˃ᴗ˂)ﻭ",
|
||||
"٩(◕‿◕。)۶",
|
||||
"(✿╹◡╹)",
|
||||
"ヽ(・∀・)ノ",
|
||||
"(ノ´ヮ`)ノ*:・゚✧",
|
||||
"♪(๑ᴖ◡ᴖ๑)♪",
|
||||
"(◠‿◠)",
|
||||
"٩(ˊᗜˋ*)و",
|
||||
"(^▽^)",
|
||||
"ヾ(^∇^)",
|
||||
"(★ω★)/",
|
||||
"٩(。•́‿•̀。)۶",
|
||||
"(◕ᴗ◕✿)",
|
||||
"\(◎o◎)/",
|
||||
"(✧ω✧)",
|
||||
"ヽ(>∀<☆)ノ",
|
||||
"( ˘▽˘)っ",
|
||||
"(≧◡≦) ♡",
|
||||
"ヾ( ̄▽ ̄)",
|
||||
]
|
||||
KAWAII_THINK = [
|
||||
"(っ°Д°;)っ", "(;′⌒`)", "(・_・ヾ", "( ´_ゝ`)", "( ̄ヘ ̄)",
|
||||
"(。-`ω´-)", "( ˘︹˘ )", "(¬_¬)", "ヽ(ー_ー )ノ", "(;一_一)",
|
||||
"(っ°Д°;)っ",
|
||||
"(;′⌒`)",
|
||||
"(・_・ヾ",
|
||||
"( ´_ゝ`)",
|
||||
"( ̄ヘ ̄)",
|
||||
"(。-`ω´-)",
|
||||
"( ˘︹˘ )",
|
||||
"(¬_¬)",
|
||||
"ヽ(ー_ー )ノ",
|
||||
"(;一_一)",
|
||||
]
|
||||
KAWAII_GENERIC = [
|
||||
"♪(´ε` )", "(◕‿◕✿)", "ヾ(^∇^)", "٩(◕‿◕。)۶", "(✿◠‿◠)",
|
||||
"(ノ´ヮ`)ノ*:・゚✧", "ヽ(>∀<☆)ノ", "(☆▽☆)", "( ˘▽˘)っ", "(≧◡≦)",
|
||||
"♪(´ε` )",
|
||||
"(◕‿◕✿)",
|
||||
"ヾ(^∇^)",
|
||||
"٩(◕‿◕。)۶",
|
||||
"(✿◠‿◠)",
|
||||
"(ノ´ヮ`)ノ*:・゚✧",
|
||||
"ヽ(>∀<☆)ノ",
|
||||
"(☆▽☆)",
|
||||
"( ˘▽˘)っ",
|
||||
"(≧◡≦)",
|
||||
]
|
||||
|
||||
|
||||
@@ -283,6 +399,7 @@ KAWAII_GENERIC = [
|
||||
# Cute tool message (completion line that replaces the spinner)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _detect_tool_failure(tool_name: str, result: str | None) -> tuple[bool, str]:
|
||||
"""Inspect a tool result string for signs of failure.
|
||||
|
||||
@@ -321,7 +438,10 @@ def _detect_tool_failure(tool_name: str, result: str | None) -> tuple[bool, str]
|
||||
|
||||
|
||||
def get_cute_tool_message(
|
||||
tool_name: str, args: dict, duration: float, result: str | None = None,
|
||||
tool_name: str,
|
||||
args: dict,
|
||||
duration: float,
|
||||
result: str | None = None,
|
||||
) -> str:
|
||||
"""Generate a formatted tool completion line for CLI quiet mode.
|
||||
|
||||
@@ -335,11 +455,11 @@ def get_cute_tool_message(
|
||||
|
||||
def _trunc(s, n=40):
|
||||
s = str(s)
|
||||
return (s[:n-3] + "...") if len(s) > n else s
|
||||
return (s[: n - 3] + "...") if len(s) > n else s
|
||||
|
||||
def _path(p, n=35):
|
||||
p = str(p)
|
||||
return ("..." + p[-(n-3):]) if len(p) > n else p
|
||||
return ("..." + p[-(n - 3) :]) if len(p) > n else p
|
||||
|
||||
def _wrap(line: str) -> str:
|
||||
"""Append failure suffix when the tool failed."""
|
||||
@@ -354,7 +474,7 @@ def get_cute_tool_message(
|
||||
if urls:
|
||||
url = urls[0] if isinstance(urls, list) else str(urls)
|
||||
domain = url.replace("https://", "").replace("http://", "").split("/")[0]
|
||||
extra = f" +{len(urls)-1}" if len(urls) > 1 else ""
|
||||
extra = f" +{len(urls) - 1}" if len(urls) > 1 else ""
|
||||
return _wrap(f"┊ 📄 fetch {_trunc(domain, 35)}{extra} {dur}")
|
||||
return _wrap(f"┊ 📄 fetch pages {dur}")
|
||||
if tool_name == "web_crawl":
|
||||
@@ -366,8 +486,15 @@ def get_cute_tool_message(
|
||||
if tool_name == "process":
|
||||
action = args.get("action", "?")
|
||||
sid = args.get("session_id", "")[:12]
|
||||
labels = {"list": "ls processes", "poll": f"poll {sid}", "log": f"log {sid}",
|
||||
"wait": f"wait {sid}", "kill": f"kill {sid}", "write": f"write {sid}", "submit": f"submit {sid}"}
|
||||
labels = {
|
||||
"list": "ls processes",
|
||||
"poll": f"poll {sid}",
|
||||
"log": f"log {sid}",
|
||||
"wait": f"wait {sid}",
|
||||
"kill": f"kill {sid}",
|
||||
"write": f"write {sid}",
|
||||
"submit": f"submit {sid}",
|
||||
}
|
||||
return _wrap(f"┊ ⚙️ proc {labels.get(action, f'{action} {sid}')} {dur}")
|
||||
if tool_name == "read_file":
|
||||
return _wrap(f"┊ 📖 read {_path(args.get('path', ''))} {dur}")
|
||||
@@ -390,7 +517,7 @@ def get_cute_tool_message(
|
||||
if tool_name == "browser_click":
|
||||
return _wrap(f"┊ 👆 click {args.get('ref', '?')} {dur}")
|
||||
if tool_name == "browser_type":
|
||||
return _wrap(f"┊ ⌨️ type \"{_trunc(args.get('text', ''), 30)}\" {dur}")
|
||||
return _wrap(f'┊ ⌨️ type "{_trunc(args.get("text", ""), 30)}" {dur}')
|
||||
if tool_name == "browser_scroll":
|
||||
d = args.get("direction", "down")
|
||||
arrow = {"down": "↓", "up": "↑", "right": "→", "left": "←"}.get(d, "↓")
|
||||
@@ -415,16 +542,16 @@ def get_cute_tool_message(
|
||||
else:
|
||||
return _wrap(f"┊ 📋 plan {len(todos_arg)} task(s) {dur}")
|
||||
if tool_name == "session_search":
|
||||
return _wrap(f"┊ 🔍 recall \"{_trunc(args.get('query', ''), 35)}\" {dur}")
|
||||
return _wrap(f'┊ 🔍 recall "{_trunc(args.get("query", ""), 35)}" {dur}')
|
||||
if tool_name == "memory":
|
||||
action = args.get("action", "?")
|
||||
target = args.get("target", "")
|
||||
if action == "add":
|
||||
return _wrap(f"┊ 🧠 memory +{target}: \"{_trunc(args.get('content', ''), 30)}\" {dur}")
|
||||
return _wrap(f'┊ 🧠 memory +{target}: "{_trunc(args.get("content", ""), 30)}" {dur}')
|
||||
elif action == "replace":
|
||||
return _wrap(f"┊ 🧠 memory ~{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
|
||||
return _wrap(f'┊ 🧠 memory ~{target}: "{_trunc(args.get("old_text", ""), 20)}" {dur}')
|
||||
elif action == "remove":
|
||||
return _wrap(f"┊ 🧠 memory -{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
|
||||
return _wrap(f'┊ 🧠 memory -{target}: "{_trunc(args.get("old_text", ""), 20)}" {dur}')
|
||||
return _wrap(f"┊ 🧠 memory {action} {dur}")
|
||||
if tool_name == "skills_list":
|
||||
return _wrap(f"┊ 📚 skills list {args.get('category', 'all')} {dur}")
|
||||
@@ -439,7 +566,7 @@ def get_cute_tool_message(
|
||||
if tool_name == "mixture_of_agents":
|
||||
return _wrap(f"┊ 🧠 reason {_trunc(args.get('user_prompt', ''), 30)} {dur}")
|
||||
if tool_name == "send_message":
|
||||
return _wrap(f"┊ 📨 send {args.get('target', '?')}: \"{_trunc(args.get('message', ''), 25)}\" {dur}")
|
||||
return _wrap(f'┊ 📨 send {args.get("target", "?")}: "{_trunc(args.get("message", ""), 25)}" {dur}')
|
||||
if tool_name == "schedule_cronjob":
|
||||
return _wrap(f"┊ ⏰ schedule {_trunc(args.get('name', args.get('prompt', 'task')), 30)} {dur}")
|
||||
if tool_name == "list_cronjobs":
|
||||
@@ -448,11 +575,16 @@ def get_cute_tool_message(
|
||||
return _wrap(f"┊ ⏰ remove job {args.get('job_id', '?')} {dur}")
|
||||
if tool_name.startswith("rl_"):
|
||||
rl = {
|
||||
"rl_list_environments": "list envs", "rl_select_environment": f"select {args.get('name', '')}",
|
||||
"rl_get_current_config": "get config", "rl_edit_config": f"set {args.get('field', '?')}",
|
||||
"rl_start_training": "start training", "rl_check_status": f"status {args.get('run_id', '?')[:12]}",
|
||||
"rl_stop_training": f"stop {args.get('run_id', '?')[:12]}", "rl_get_results": f"results {args.get('run_id', '?')[:12]}",
|
||||
"rl_list_runs": "list runs", "rl_test_inference": "test inference",
|
||||
"rl_list_environments": "list envs",
|
||||
"rl_select_environment": f"select {args.get('name', '')}",
|
||||
"rl_get_current_config": "get config",
|
||||
"rl_edit_config": f"set {args.get('field', '?')}",
|
||||
"rl_start_training": "start training",
|
||||
"rl_check_status": f"status {args.get('run_id', '?')[:12]}",
|
||||
"rl_stop_training": f"stop {args.get('run_id', '?')[:12]}",
|
||||
"rl_get_results": f"results {args.get('run_id', '?')[:12]}",
|
||||
"rl_list_runs": "list runs",
|
||||
"rl_test_inference": "test inference",
|
||||
}
|
||||
return _wrap(f"┊ 🧪 rl {rl.get(tool_name, tool_name.replace('rl_', ''))} {dur}")
|
||||
if tool_name == "execute_code":
|
||||
|
||||
@@ -20,7 +20,7 @@ import json
|
||||
import time
|
||||
from collections import Counter, defaultdict
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
# =========================================================================
|
||||
# Model pricing (USD per million tokens) — approximate as of early 2026
|
||||
@@ -81,7 +81,7 @@ def _has_known_pricing(model_name: str) -> bool:
|
||||
return _get_pricing(model_name) is not _DEFAULT_PRICING
|
||||
|
||||
|
||||
def _get_pricing(model_name: str) -> Dict[str, float]:
|
||||
def _get_pricing(model_name: str) -> dict[str, float]:
|
||||
"""Look up pricing for a model. Uses fuzzy matching on model name.
|
||||
|
||||
Returns _DEFAULT_PRICING (zero cost) for unknown/custom models —
|
||||
@@ -150,7 +150,7 @@ def _format_duration(seconds: float) -> str:
|
||||
return f"{days:.1f}d"
|
||||
|
||||
|
||||
def _bar_chart(values: List[int], max_width: int = 20) -> List[str]:
|
||||
def _bar_chart(values: list[int], max_width: int = 20) -> list[str]:
|
||||
"""Create simple horizontal bar chart strings from values."""
|
||||
peak = max(values) if values else 1
|
||||
if peak == 0:
|
||||
@@ -176,7 +176,7 @@ class InsightsEngine:
|
||||
self.db = db
|
||||
self._conn = db._conn
|
||||
|
||||
def generate(self, days: int = 30, source: str = None) -> Dict[str, Any]:
|
||||
def generate(self, days: int = 30, source: str = None) -> dict[str, Any]:
|
||||
"""
|
||||
Generate a complete insights report.
|
||||
|
||||
@@ -233,10 +233,11 @@ class InsightsEngine:
|
||||
# =========================================================================
|
||||
|
||||
# Columns we actually need (skip system_prompt, model_config blobs)
|
||||
_SESSION_COLS = ("id, source, model, started_at, ended_at, "
|
||||
"message_count, tool_call_count, input_tokens, output_tokens")
|
||||
_SESSION_COLS = (
|
||||
"id, source, model, started_at, ended_at, message_count, tool_call_count, input_tokens, output_tokens"
|
||||
)
|
||||
|
||||
def _get_sessions(self, cutoff: float, source: str = None) -> List[Dict]:
|
||||
def _get_sessions(self, cutoff: float, source: str = None) -> list[dict]:
|
||||
"""Fetch sessions within the time window."""
|
||||
if source:
|
||||
cursor = self._conn.execute(
|
||||
@@ -254,7 +255,7 @@ class InsightsEngine:
|
||||
)
|
||||
return [dict(row) for row in cursor.fetchall()]
|
||||
|
||||
def _get_tool_usage(self, cutoff: float, source: str = None) -> List[Dict]:
|
||||
def _get_tool_usage(self, cutoff: float, source: str = None) -> list[dict]:
|
||||
"""Get tool call counts from messages.
|
||||
|
||||
Uses two sources:
|
||||
@@ -341,12 +342,9 @@ class InsightsEngine:
|
||||
tool_counts = merged
|
||||
|
||||
# Convert to the expected format
|
||||
return [
|
||||
{"tool_name": name, "count": count}
|
||||
for name, count in tool_counts.most_common()
|
||||
]
|
||||
return [{"tool_name": name, "count": count} for name, count in tool_counts.most_common()]
|
||||
|
||||
def _get_message_stats(self, cutoff: float, source: str = None) -> Dict:
|
||||
def _get_message_stats(self, cutoff: float, source: str = None) -> dict:
|
||||
"""Get aggregate message statistics."""
|
||||
if source:
|
||||
cursor = self._conn.execute(
|
||||
@@ -373,16 +371,22 @@ class InsightsEngine:
|
||||
(cutoff,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
return dict(row) if row else {
|
||||
"total_messages": 0, "user_messages": 0,
|
||||
"assistant_messages": 0, "tool_messages": 0,
|
||||
}
|
||||
return (
|
||||
dict(row)
|
||||
if row
|
||||
else {
|
||||
"total_messages": 0,
|
||||
"user_messages": 0,
|
||||
"assistant_messages": 0,
|
||||
"tool_messages": 0,
|
||||
}
|
||||
)
|
||||
|
||||
# =========================================================================
|
||||
# Computation
|
||||
# =========================================================================
|
||||
|
||||
def _compute_overview(self, sessions: List[Dict], message_stats: Dict) -> Dict:
|
||||
def _compute_overview(self, sessions: list[dict], message_stats: dict) -> dict:
|
||||
"""Compute high-level overview statistics."""
|
||||
total_input = sum(s.get("input_tokens") or 0 for s in sessions)
|
||||
total_output = sum(s.get("output_tokens") or 0 for s in sessions)
|
||||
@@ -442,12 +446,18 @@ class InsightsEngine:
|
||||
"models_without_pricing": sorted(models_without_pricing),
|
||||
}
|
||||
|
||||
def _compute_model_breakdown(self, sessions: List[Dict]) -> List[Dict]:
|
||||
def _compute_model_breakdown(self, sessions: list[dict]) -> list[dict]:
|
||||
"""Break down usage by model."""
|
||||
model_data = defaultdict(lambda: {
|
||||
"sessions": 0, "input_tokens": 0, "output_tokens": 0,
|
||||
"total_tokens": 0, "tool_calls": 0, "cost": 0.0,
|
||||
})
|
||||
model_data = defaultdict(
|
||||
lambda: {
|
||||
"sessions": 0,
|
||||
"input_tokens": 0,
|
||||
"output_tokens": 0,
|
||||
"total_tokens": 0,
|
||||
"tool_calls": 0,
|
||||
"cost": 0.0,
|
||||
}
|
||||
)
|
||||
|
||||
for s in sessions:
|
||||
model = s.get("model") or "unknown"
|
||||
@@ -464,20 +474,23 @@ class InsightsEngine:
|
||||
d["cost"] += _estimate_cost(model, inp, out)
|
||||
d["has_pricing"] = _has_known_pricing(model)
|
||||
|
||||
result = [
|
||||
{"model": model, **data}
|
||||
for model, data in model_data.items()
|
||||
]
|
||||
result = [{"model": model, **data} for model, data in model_data.items()]
|
||||
# Sort by tokens first, fall back to session count when tokens are 0
|
||||
result.sort(key=lambda x: (x["total_tokens"], x["sessions"]), reverse=True)
|
||||
return result
|
||||
|
||||
def _compute_platform_breakdown(self, sessions: List[Dict]) -> List[Dict]:
|
||||
def _compute_platform_breakdown(self, sessions: list[dict]) -> list[dict]:
|
||||
"""Break down usage by platform/source."""
|
||||
platform_data = defaultdict(lambda: {
|
||||
"sessions": 0, "messages": 0, "input_tokens": 0,
|
||||
"output_tokens": 0, "total_tokens": 0, "tool_calls": 0,
|
||||
})
|
||||
platform_data = defaultdict(
|
||||
lambda: {
|
||||
"sessions": 0,
|
||||
"messages": 0,
|
||||
"input_tokens": 0,
|
||||
"output_tokens": 0,
|
||||
"total_tokens": 0,
|
||||
"tool_calls": 0,
|
||||
}
|
||||
)
|
||||
|
||||
for s in sessions:
|
||||
source = s.get("source") or "unknown"
|
||||
@@ -491,27 +504,26 @@ class InsightsEngine:
|
||||
d["total_tokens"] += inp + out
|
||||
d["tool_calls"] += s.get("tool_call_count") or 0
|
||||
|
||||
result = [
|
||||
{"platform": platform, **data}
|
||||
for platform, data in platform_data.items()
|
||||
]
|
||||
result = [{"platform": platform, **data} for platform, data in platform_data.items()]
|
||||
result.sort(key=lambda x: x["sessions"], reverse=True)
|
||||
return result
|
||||
|
||||
def _compute_tool_breakdown(self, tool_usage: List[Dict]) -> List[Dict]:
|
||||
def _compute_tool_breakdown(self, tool_usage: list[dict]) -> list[dict]:
|
||||
"""Process tool usage data into a ranked list with percentages."""
|
||||
total_calls = sum(t["count"] for t in tool_usage) if tool_usage else 0
|
||||
result = []
|
||||
for t in tool_usage:
|
||||
pct = (t["count"] / total_calls * 100) if total_calls else 0
|
||||
result.append({
|
||||
"tool": t["tool_name"],
|
||||
"count": t["count"],
|
||||
"percentage": pct,
|
||||
})
|
||||
result.append(
|
||||
{
|
||||
"tool": t["tool_name"],
|
||||
"count": t["count"],
|
||||
"percentage": pct,
|
||||
}
|
||||
)
|
||||
return result
|
||||
|
||||
def _compute_activity_patterns(self, sessions: List[Dict]) -> Dict:
|
||||
def _compute_activity_patterns(self, sessions: list[dict]) -> dict:
|
||||
"""Analyze activity patterns by day of week and hour."""
|
||||
day_counts = Counter() # 0=Monday ... 6=Sunday
|
||||
hour_counts = Counter()
|
||||
@@ -527,15 +539,9 @@ class InsightsEngine:
|
||||
daily_counts[dt.strftime("%Y-%m-%d")] += 1
|
||||
|
||||
day_names = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
||||
day_breakdown = [
|
||||
{"day": day_names[i], "count": day_counts.get(i, 0)}
|
||||
for i in range(7)
|
||||
]
|
||||
day_breakdown = [{"day": day_names[i], "count": day_counts.get(i, 0)} for i in range(7)]
|
||||
|
||||
hour_breakdown = [
|
||||
{"hour": i, "count": hour_counts.get(i, 0)}
|
||||
for i in range(24)
|
||||
]
|
||||
hour_breakdown = [{"hour": i, "count": hour_counts.get(i, 0)} for i in range(24)]
|
||||
|
||||
# Busiest day and hour
|
||||
busiest_day = max(day_breakdown, key=lambda x: x["count"]) if day_breakdown else None
|
||||
@@ -569,37 +575,40 @@ class InsightsEngine:
|
||||
"max_streak": max_streak,
|
||||
}
|
||||
|
||||
def _compute_top_sessions(self, sessions: List[Dict]) -> List[Dict]:
|
||||
def _compute_top_sessions(self, sessions: list[dict]) -> list[dict]:
|
||||
"""Find notable sessions (longest, most messages, most tokens)."""
|
||||
top = []
|
||||
|
||||
# Longest by duration
|
||||
sessions_with_duration = [
|
||||
s for s in sessions
|
||||
if s.get("started_at") and s.get("ended_at")
|
||||
]
|
||||
sessions_with_duration = [s for s in sessions if s.get("started_at") and s.get("ended_at")]
|
||||
if sessions_with_duration:
|
||||
longest = max(
|
||||
sessions_with_duration,
|
||||
key=lambda s: (s["ended_at"] - s["started_at"]),
|
||||
key=lambda s: s["ended_at"] - s["started_at"],
|
||||
)
|
||||
dur = longest["ended_at"] - longest["started_at"]
|
||||
top.append({
|
||||
"label": "Longest session",
|
||||
"session_id": longest["id"][:16],
|
||||
"value": _format_duration(dur),
|
||||
"date": datetime.fromtimestamp(longest["started_at"]).strftime("%b %d"),
|
||||
})
|
||||
top.append(
|
||||
{
|
||||
"label": "Longest session",
|
||||
"session_id": longest["id"][:16],
|
||||
"value": _format_duration(dur),
|
||||
"date": datetime.fromtimestamp(longest["started_at"]).strftime("%b %d"),
|
||||
}
|
||||
)
|
||||
|
||||
# Most messages
|
||||
most_msgs = max(sessions, key=lambda s: s.get("message_count") or 0)
|
||||
if (most_msgs.get("message_count") or 0) > 0:
|
||||
top.append({
|
||||
"label": "Most messages",
|
||||
"session_id": most_msgs["id"][:16],
|
||||
"value": f"{most_msgs['message_count']} msgs",
|
||||
"date": datetime.fromtimestamp(most_msgs["started_at"]).strftime("%b %d") if most_msgs.get("started_at") else "?",
|
||||
})
|
||||
top.append(
|
||||
{
|
||||
"label": "Most messages",
|
||||
"session_id": most_msgs["id"][:16],
|
||||
"value": f"{most_msgs['message_count']} msgs",
|
||||
"date": datetime.fromtimestamp(most_msgs["started_at"]).strftime("%b %d")
|
||||
if most_msgs.get("started_at")
|
||||
else "?",
|
||||
}
|
||||
)
|
||||
|
||||
# Most tokens
|
||||
most_tokens = max(
|
||||
@@ -608,22 +617,30 @@ class InsightsEngine:
|
||||
)
|
||||
token_total = (most_tokens.get("input_tokens") or 0) + (most_tokens.get("output_tokens") or 0)
|
||||
if token_total > 0:
|
||||
top.append({
|
||||
"label": "Most tokens",
|
||||
"session_id": most_tokens["id"][:16],
|
||||
"value": f"{token_total:,} tokens",
|
||||
"date": datetime.fromtimestamp(most_tokens["started_at"]).strftime("%b %d") if most_tokens.get("started_at") else "?",
|
||||
})
|
||||
top.append(
|
||||
{
|
||||
"label": "Most tokens",
|
||||
"session_id": most_tokens["id"][:16],
|
||||
"value": f"{token_total:,} tokens",
|
||||
"date": datetime.fromtimestamp(most_tokens["started_at"]).strftime("%b %d")
|
||||
if most_tokens.get("started_at")
|
||||
else "?",
|
||||
}
|
||||
)
|
||||
|
||||
# Most tool calls
|
||||
most_tools = max(sessions, key=lambda s: s.get("tool_call_count") or 0)
|
||||
if (most_tools.get("tool_call_count") or 0) > 0:
|
||||
top.append({
|
||||
"label": "Most tool calls",
|
||||
"session_id": most_tools["id"][:16],
|
||||
"value": f"{most_tools['tool_call_count']} calls",
|
||||
"date": datetime.fromtimestamp(most_tools["started_at"]).strftime("%b %d") if most_tools.get("started_at") else "?",
|
||||
})
|
||||
top.append(
|
||||
{
|
||||
"label": "Most tool calls",
|
||||
"session_id": most_tools["id"][:16],
|
||||
"value": f"{most_tools['tool_call_count']} calls",
|
||||
"date": datetime.fromtimestamp(most_tools["started_at"]).strftime("%b %d")
|
||||
if most_tools.get("started_at")
|
||||
else "?",
|
||||
}
|
||||
)
|
||||
|
||||
return top
|
||||
|
||||
@@ -631,7 +648,7 @@ class InsightsEngine:
|
||||
# Formatting
|
||||
# =========================================================================
|
||||
|
||||
def format_terminal(self, report: Dict) -> str:
|
||||
def format_terminal(self, report: dict) -> str:
|
||||
"""Format the insights report for terminal display (CLI)."""
|
||||
if report.get("empty"):
|
||||
days = report.get("days", 30)
|
||||
@@ -669,13 +686,17 @@ class InsightsEngine:
|
||||
lines.append(" " + "─" * 56)
|
||||
lines.append(f" Sessions: {o['total_sessions']:<12} Messages: {o['total_messages']:,}")
|
||||
lines.append(f" Tool calls: {o['total_tool_calls']:<12,} User messages: {o['user_messages']:,}")
|
||||
lines.append(f" Input tokens: {o['total_input_tokens']:<12,} Output tokens: {o['total_output_tokens']:,}")
|
||||
lines.append(
|
||||
f" Input tokens: {o['total_input_tokens']:<12,} Output tokens: {o['total_output_tokens']:,}"
|
||||
)
|
||||
cost_str = f"${o['estimated_cost']:.2f}"
|
||||
if o.get("models_without_pricing"):
|
||||
cost_str += " *"
|
||||
lines.append(f" Total tokens: {o['total_tokens']:<12,} Est. cost: {cost_str}")
|
||||
if o["total_hours"] > 0:
|
||||
lines.append(f" Active time: ~{_format_duration(o['total_hours'] * 3600):<11} Avg session: ~{_format_duration(o['avg_session_duration'])}")
|
||||
lines.append(
|
||||
f" Active time: ~{_format_duration(o['total_hours'] * 3600):<11} Avg session: ~{_format_duration(o['avg_session_duration'])}"
|
||||
)
|
||||
lines.append(f" Avg msgs/session: {o['avg_messages_per_session']:.1f}")
|
||||
lines.append("")
|
||||
|
||||
@@ -692,7 +713,7 @@ class InsightsEngine:
|
||||
cost_cell = " N/A"
|
||||
lines.append(f" {model_name:<30} {m['sessions']:>8} {m['total_tokens']:>12,} {cost_cell}")
|
||||
if o.get("models_without_pricing"):
|
||||
lines.append(f" * Cost N/A for custom/self-hosted models")
|
||||
lines.append(" * Cost N/A for custom/self-hosted models")
|
||||
lines.append("")
|
||||
|
||||
# Platform breakdown
|
||||
@@ -758,7 +779,7 @@ class InsightsEngine:
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def format_gateway(self, report: Dict) -> str:
|
||||
def format_gateway(self, report: dict) -> str:
|
||||
"""Format the insights report for gateway/messaging (shorter)."""
|
||||
if report.get("empty"):
|
||||
days = report.get("days", 30)
|
||||
@@ -771,14 +792,20 @@ class InsightsEngine:
|
||||
lines.append(f"📊 **Hermes Insights** — Last {days} days\n")
|
||||
|
||||
# Overview
|
||||
lines.append(f"**Sessions:** {o['total_sessions']} | **Messages:** {o['total_messages']:,} | **Tool calls:** {o['total_tool_calls']:,}")
|
||||
lines.append(f"**Tokens:** {o['total_tokens']:,} (in: {o['total_input_tokens']:,} / out: {o['total_output_tokens']:,})")
|
||||
lines.append(
|
||||
f"**Sessions:** {o['total_sessions']} | **Messages:** {o['total_messages']:,} | **Tool calls:** {o['total_tool_calls']:,}"
|
||||
)
|
||||
lines.append(
|
||||
f"**Tokens:** {o['total_tokens']:,} (in: {o['total_input_tokens']:,} / out: {o['total_output_tokens']:,})"
|
||||
)
|
||||
cost_note = ""
|
||||
if o.get("models_without_pricing"):
|
||||
cost_note = " _(excludes custom/self-hosted models)_"
|
||||
lines.append(f"**Est. cost:** ${o['estimated_cost']:.2f}{cost_note}")
|
||||
if o["total_hours"] > 0:
|
||||
lines.append(f"**Active time:** ~{_format_duration(o['total_hours'] * 3600)} | **Avg session:** ~{_format_duration(o['avg_session_duration'])}")
|
||||
lines.append(
|
||||
f"**Active time:** ~{_format_duration(o['total_hours'] * 3600)} | **Avg session:** ~{_format_duration(o['avg_session_duration'])}"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
# Models (top 5)
|
||||
@@ -786,7 +813,9 @@ class InsightsEngine:
|
||||
lines.append("**🤖 Models:**")
|
||||
for m in report["models"][:5]:
|
||||
cost_str = f"${m['cost']:.2f}" if m.get("has_pricing") else "N/A"
|
||||
lines.append(f" {m['model'][:25]} — {m['sessions']} sessions, {m['total_tokens']:,} tokens, {cost_str}")
|
||||
lines.append(
|
||||
f" {m['model'][:25]} — {m['sessions']} sessions, {m['total_tokens']:,} tokens, {cost_str}"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
# Platforms (if multi-platform)
|
||||
@@ -809,9 +838,13 @@ class InsightsEngine:
|
||||
hr = act["busiest_hour"]["hour"]
|
||||
ampm = "AM" if hr < 12 else "PM"
|
||||
display_hr = hr % 12 or 12
|
||||
lines.append(f"**📅 Busiest:** {act['busiest_day']['day']}s ({act['busiest_day']['count']} sessions), {display_hr}{ampm} ({act['busiest_hour']['count']} sessions)")
|
||||
lines.append(
|
||||
f"**📅 Busiest:** {act['busiest_day']['day']}s ({act['busiest_day']['count']} sessions), {display_hr}{ampm} ({act['busiest_hour']['count']} sessions)"
|
||||
)
|
||||
if act.get("active_days"):
|
||||
lines.append(f"**Active days:** {act['active_days']}", )
|
||||
lines.append(
|
||||
f"**Active days:** {act['active_days']}",
|
||||
)
|
||||
if act.get("max_streak", 0) > 1:
|
||||
lines.append(f"**Best streak:** {act['max_streak']} consecutive days")
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import os
|
||||
import re
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
@@ -18,7 +18,7 @@ from hermes_constants import OPENROUTER_MODELS_URL
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_model_metadata_cache: Dict[str, Dict[str, Any]] = {}
|
||||
_model_metadata_cache: dict[str, dict[str, Any]] = {}
|
||||
_model_metadata_cache_time: float = 0
|
||||
_MODEL_CACHE_TTL = 3600
|
||||
|
||||
@@ -63,7 +63,7 @@ DEFAULT_CONTEXT_LENGTHS = {
|
||||
}
|
||||
|
||||
|
||||
def fetch_model_metadata(force_refresh: bool = False) -> Dict[str, Dict[str, Any]]:
|
||||
def fetch_model_metadata(force_refresh: bool = False) -> dict[str, dict[str, Any]]:
|
||||
"""Fetch model metadata from OpenRouter (cached for 1 hour)."""
|
||||
global _model_metadata_cache, _model_metadata_cache_time
|
||||
|
||||
@@ -104,7 +104,7 @@ def _get_context_cache_path() -> Path:
|
||||
return hermes_home / "context_length_cache.yaml"
|
||||
|
||||
|
||||
def _load_context_cache() -> Dict[str, int]:
|
||||
def _load_context_cache() -> dict[str, int]:
|
||||
"""Load the model+provider → context_length cache from disk."""
|
||||
path = _get_context_cache_path()
|
||||
if not path.exists():
|
||||
@@ -139,14 +139,14 @@ def save_context_length(model: str, base_url: str, length: int) -> None:
|
||||
logger.debug("Failed to save context length cache: %s", e)
|
||||
|
||||
|
||||
def get_cached_context_length(model: str, base_url: str) -> Optional[int]:
|
||||
def get_cached_context_length(model: str, base_url: str) -> int | None:
|
||||
"""Look up a previously discovered context length for model+provider."""
|
||||
key = f"{model}@{base_url}"
|
||||
cache = _load_context_cache()
|
||||
return cache.get(key)
|
||||
|
||||
|
||||
def get_next_probe_tier(current_length: int) -> Optional[int]:
|
||||
def get_next_probe_tier(current_length: int) -> int | None:
|
||||
"""Return the next lower probe tier, or None if already at minimum."""
|
||||
for tier in CONTEXT_PROBE_TIERS:
|
||||
if tier < current_length:
|
||||
@@ -154,7 +154,7 @@ def get_next_probe_tier(current_length: int) -> Optional[int]:
|
||||
return None
|
||||
|
||||
|
||||
def parse_context_limit_from_error(error_msg: str) -> Optional[int]:
|
||||
def parse_context_limit_from_error(error_msg: str) -> int | None:
|
||||
"""Try to extract the actual context limit from an API error message.
|
||||
|
||||
Many providers include the limit in their error text, e.g.:
|
||||
@@ -166,11 +166,11 @@ def parse_context_limit_from_error(error_msg: str) -> Optional[int]:
|
||||
error_lower = error_msg.lower()
|
||||
# Pattern: look for numbers near context-related keywords
|
||||
patterns = [
|
||||
r'(?:max(?:imum)?|limit)\s*(?:context\s*)?(?:length|size|window)?\s*(?:is|of|:)?\s*(\d{4,})',
|
||||
r'context\s*(?:length|size|window)\s*(?:is|of|:)?\s*(\d{4,})',
|
||||
r'(\d{4,})\s*(?:token)?\s*(?:context|limit)',
|
||||
r'>\s*(\d{4,})\s*(?:max|limit|token)', # "250000 tokens > 200000 maximum"
|
||||
r'(\d{4,})\s*(?:max(?:imum)?)\b', # "200000 maximum"
|
||||
r"(?:max(?:imum)?|limit)\s*(?:context\s*)?(?:length|size|window)?\s*(?:is|of|:)?\s*(\d{4,})",
|
||||
r"context\s*(?:length|size|window)\s*(?:is|of|:)?\s*(\d{4,})",
|
||||
r"(\d{4,})\s*(?:token)?\s*(?:context|limit)",
|
||||
r">\s*(\d{4,})\s*(?:max|limit|token)", # "250000 tokens > 200000 maximum"
|
||||
r"(\d{4,})\s*(?:max(?:imum)?)\b", # "200000 maximum"
|
||||
]
|
||||
for pattern in patterns:
|
||||
match = re.search(pattern, error_lower)
|
||||
@@ -218,7 +218,7 @@ def estimate_tokens_rough(text: str) -> int:
|
||||
return len(text) // 4
|
||||
|
||||
|
||||
def estimate_messages_tokens_rough(messages: List[Dict[str, Any]]) -> int:
|
||||
def estimate_messages_tokens_rough(messages: list[dict[str, Any]]) -> int:
|
||||
"""Rough token estimate for a message list (pre-flight only)."""
|
||||
total_chars = sum(len(str(msg)) for msg in messages)
|
||||
return total_chars // 4
|
||||
|
||||
@@ -8,7 +8,6 @@ import logging
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,21 +17,29 @@ logger = logging.getLogger(__name__)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_CONTEXT_THREAT_PATTERNS = [
|
||||
(r'ignore\s+(previous|all|above|prior)\s+instructions', "prompt_injection"),
|
||||
(r'do\s+not\s+tell\s+the\s+user', "deception_hide"),
|
||||
(r'system\s+prompt\s+override', "sys_prompt_override"),
|
||||
(r'disregard\s+(your|all|any)\s+(instructions|rules|guidelines)', "disregard_rules"),
|
||||
(r'act\s+as\s+(if|though)\s+you\s+(have\s+no|don\'t\s+have)\s+(restrictions|limits|rules)', "bypass_restrictions"),
|
||||
(r'<!--[^>]*(?:ignore|override|system|secret|hidden)[^>]*-->', "html_comment_injection"),
|
||||
(r"ignore\s+(previous|all|above|prior)\s+instructions", "prompt_injection"),
|
||||
(r"do\s+not\s+tell\s+the\s+user", "deception_hide"),
|
||||
(r"system\s+prompt\s+override", "sys_prompt_override"),
|
||||
(r"disregard\s+(your|all|any)\s+(instructions|rules|guidelines)", "disregard_rules"),
|
||||
(r"act\s+as\s+(if|though)\s+you\s+(have\s+no|don\'t\s+have)\s+(restrictions|limits|rules)", "bypass_restrictions"),
|
||||
(r"<!--[^>]*(?:ignore|override|system|secret|hidden)[^>]*-->", "html_comment_injection"),
|
||||
(r'<\s*div\s+style\s*=\s*["\'].*display\s*:\s*none', "hidden_div"),
|
||||
(r'translate\s+.*\s+into\s+.*\s+and\s+(execute|run|eval)', "translate_execute"),
|
||||
(r'curl\s+[^\n]*\$\{?\w*(KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL|API)', "exfil_curl"),
|
||||
(r'cat\s+[^\n]*(\.env|credentials|\.netrc|\.pgpass)', "read_secrets"),
|
||||
(r"translate\s+.*\s+into\s+.*\s+and\s+(execute|run|eval)", "translate_execute"),
|
||||
(r"curl\s+[^\n]*\$\{?\w*(KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL|API)", "exfil_curl"),
|
||||
(r"cat\s+[^\n]*(\.env|credentials|\.netrc|\.pgpass)", "read_secrets"),
|
||||
]
|
||||
|
||||
_CONTEXT_INVISIBLE_CHARS = {
|
||||
'\u200b', '\u200c', '\u200d', '\u2060', '\ufeff',
|
||||
'\u202a', '\u202b', '\u202c', '\u202d', '\u202e',
|
||||
"\u200b",
|
||||
"\u200c",
|
||||
"\u200d",
|
||||
"\u2060",
|
||||
"\ufeff",
|
||||
"\u202a",
|
||||
"\u202b",
|
||||
"\u202c",
|
||||
"\u202d",
|
||||
"\u202e",
|
||||
}
|
||||
|
||||
|
||||
@@ -52,10 +59,13 @@ def _scan_context_content(content: str, filename: str) -> str:
|
||||
|
||||
if findings:
|
||||
logger.warning("Context file %s blocked: %s", filename, ", ".join(findings))
|
||||
return f"[BLOCKED: {filename} contained potential prompt injection ({', '.join(findings)}). Content not loaded.]"
|
||||
return (
|
||||
f"[BLOCKED: {filename} contained potential prompt injection ({', '.join(findings)}). Content not loaded.]"
|
||||
)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Constants
|
||||
# =========================================================================
|
||||
@@ -131,10 +141,7 @@ PLATFORM_HINTS = {
|
||||
"files arrive as downloadable documents. You can also include image "
|
||||
"URLs in markdown format  and they will be sent as photos."
|
||||
),
|
||||
"cli": (
|
||||
"You are a CLI AI Agent. Try not to use markdown but simple text "
|
||||
"renderable inside a terminal."
|
||||
),
|
||||
"cli": ("You are a CLI AI Agent. Try not to use markdown but simple text renderable inside a terminal."),
|
||||
}
|
||||
|
||||
CONTEXT_FILE_MAX_CHARS = 20_000
|
||||
@@ -146,18 +153,20 @@ CONTEXT_TRUNCATE_TAIL_RATIO = 0.2
|
||||
# Skills index
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _read_skill_description(skill_file: Path, max_chars: int = 60) -> str:
|
||||
"""Read the description from a SKILL.md frontmatter, capped at max_chars."""
|
||||
try:
|
||||
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
||||
match = re.search(
|
||||
r"^---\s*\n.*?description:\s*(.+?)\s*\n.*?^---",
|
||||
raw, re.MULTILINE | re.DOTALL,
|
||||
raw,
|
||||
re.MULTILINE | re.DOTALL,
|
||||
)
|
||||
if match:
|
||||
desc = match.group(1).strip().strip("'\"")
|
||||
if len(desc) > max_chars:
|
||||
desc = desc[:max_chars - 3] + "..."
|
||||
desc = desc[: max_chars - 3] + "..."
|
||||
return desc
|
||||
except Exception:
|
||||
pass
|
||||
@@ -172,6 +181,7 @@ def _skill_is_platform_compatible(skill_file: Path) -> bool:
|
||||
"""
|
||||
try:
|
||||
from tools.skills_tool import _parse_frontmatter, skill_matches_platform
|
||||
|
||||
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
||||
frontmatter, _ = _parse_frontmatter(raw)
|
||||
return skill_matches_platform(frontmatter)
|
||||
@@ -195,6 +205,8 @@ def build_skills_system_prompt() -> str:
|
||||
|
||||
# Collect skills with descriptions, grouped by category
|
||||
# Each entry: (skill_name, description)
|
||||
# Supports sub-categories: skills/mlops/training/axolotl/SKILL.md
|
||||
# → category "mlops/training", skill "axolotl"
|
||||
skills_by_category: dict[str, list[tuple[str, str]]] = {}
|
||||
for skill_file in skills_dir.rglob("SKILL.md"):
|
||||
# Skip skills incompatible with the current OS platform
|
||||
@@ -203,8 +215,13 @@ def build_skills_system_prompt() -> str:
|
||||
rel_path = skill_file.relative_to(skills_dir)
|
||||
parts = rel_path.parts
|
||||
if len(parts) >= 2:
|
||||
category = parts[0]
|
||||
# Category is everything between skills_dir and the skill folder
|
||||
# e.g. parts = ("mlops", "training", "axolotl", "SKILL.md")
|
||||
# → category = "mlops/training", skill_name = "axolotl"
|
||||
# e.g. parts = ("github", "github-auth", "SKILL.md")
|
||||
# → category = "github", skill_name = "github-auth"
|
||||
skill_name = parts[-2]
|
||||
category = "/".join(parts[:-2]) if len(parts) > 2 else parts[0]
|
||||
else:
|
||||
category = "general"
|
||||
skill_name = skill_file.parent.name
|
||||
@@ -215,9 +232,11 @@ def build_skills_system_prompt() -> str:
|
||||
return ""
|
||||
|
||||
# Read category-level descriptions from DESCRIPTION.md
|
||||
# Checks both the exact category path and parent directories
|
||||
category_descriptions = {}
|
||||
for category in skills_by_category:
|
||||
desc_file = skills_dir / category / "DESCRIPTION.md"
|
||||
cat_path = Path(category)
|
||||
desc_file = skills_dir / cat_path / "DESCRIPTION.md"
|
||||
if desc_file.exists():
|
||||
try:
|
||||
content = desc_file.read_text(encoding="utf-8")
|
||||
@@ -251,8 +270,7 @@ def build_skills_system_prompt() -> str:
|
||||
"load it with skill_view(name) and follow its instructions. "
|
||||
"If a skill has issues, fix it with skill_manage(action='patch').\n"
|
||||
"\n"
|
||||
"<available_skills>\n"
|
||||
+ "\n".join(index_lines) + "\n"
|
||||
"<available_skills>\n" + "\n".join(index_lines) + "\n"
|
||||
"</available_skills>\n"
|
||||
"\n"
|
||||
"If none match, proceed normally without loading a skill."
|
||||
@@ -263,6 +281,7 @@ def build_skills_system_prompt() -> str:
|
||||
# Context files (SOUL.md, AGENTS.md, .cursorrules)
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _truncate_content(content: str, filename: str, max_chars: int = CONTEXT_FILE_MAX_CHARS) -> str:
|
||||
"""Head/tail truncation with a marker in the middle."""
|
||||
if len(content) <= max_chars:
|
||||
@@ -275,7 +294,7 @@ def _truncate_content(content: str, filename: str, max_chars: int = CONTEXT_FILE
|
||||
return head + marker + tail
|
||||
|
||||
|
||||
def build_context_files_prompt(cwd: Optional[str] = None) -> str:
|
||||
def build_context_files_prompt(cwd: str | None = None) -> str:
|
||||
"""Discover and load context files for the system prompt.
|
||||
|
||||
Discovery: AGENTS.md (recursive), .cursorrules / .cursor/rules/*.mdc,
|
||||
@@ -298,7 +317,9 @@ def build_context_files_prompt(cwd: Optional[str] = None) -> str:
|
||||
if top_level_agents:
|
||||
agents_files = []
|
||||
for root, dirs, files in os.walk(cwd_path):
|
||||
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ('node_modules', '__pycache__', 'venv', '.venv')]
|
||||
dirs[:] = [
|
||||
d for d in dirs if not d.startswith(".") and d not in ("node_modules", "__pycache__", "venv", ".venv")
|
||||
]
|
||||
for f in files:
|
||||
if f.lower() == "agents.md":
|
||||
agents_files.append(Path(root) / f)
|
||||
@@ -375,4 +396,7 @@ def build_context_files_prompt(cwd: Optional[str] = None) -> str:
|
||||
|
||||
if not sections:
|
||||
return ""
|
||||
return "# Project Context\n\nThe following project context files have been loaded and should be followed:\n\n" + "\n".join(sections)
|
||||
return (
|
||||
"# Project Context\n\nThe following project context files have been loaded and should be followed:\n\n"
|
||||
+ "\n".join(sections)
|
||||
)
|
||||
|
||||
@@ -9,7 +9,7 @@ Pure functions -- no class state, no AIAgent dependency.
|
||||
"""
|
||||
|
||||
import copy
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _apply_cache_marker(msg: dict, cache_marker: dict) -> None:
|
||||
@@ -36,9 +36,9 @@ def _apply_cache_marker(msg: dict, cache_marker: dict) -> None:
|
||||
|
||||
|
||||
def apply_anthropic_cache_control(
|
||||
api_messages: List[Dict[str, Any]],
|
||||
api_messages: list[dict[str, Any]],
|
||||
cache_ttl: str = "5m",
|
||||
) -> List[Dict[str, Any]]:
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Apply system_and_3 caching strategy to messages for Anthropic models.
|
||||
|
||||
Places up to 4 cache_control breakpoints: system prompt + last 3 non-system messages.
|
||||
|
||||
@@ -10,34 +10,33 @@ the first 6 and last 4 characters for debuggability.
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Known API key prefixes -- match the prefix + contiguous token chars
|
||||
_PREFIX_PATTERNS = [
|
||||
r"sk-[A-Za-z0-9_-]{10,}", # OpenAI / OpenRouter / Anthropic (sk-ant-*)
|
||||
r"ghp_[A-Za-z0-9]{10,}", # GitHub PAT (classic)
|
||||
r"github_pat_[A-Za-z0-9_]{10,}", # GitHub PAT (fine-grained)
|
||||
r"xox[baprs]-[A-Za-z0-9-]{10,}", # Slack tokens
|
||||
r"AIza[A-Za-z0-9_-]{30,}", # Google API keys
|
||||
r"pplx-[A-Za-z0-9]{10,}", # Perplexity
|
||||
r"fal_[A-Za-z0-9_-]{10,}", # Fal.ai
|
||||
r"fc-[A-Za-z0-9]{10,}", # Firecrawl
|
||||
r"bb_live_[A-Za-z0-9_-]{10,}", # BrowserBase
|
||||
r"gAAAA[A-Za-z0-9_=-]{20,}", # Codex encrypted tokens
|
||||
r"AKIA[A-Z0-9]{16}", # AWS Access Key ID
|
||||
r"sk_live_[A-Za-z0-9]{10,}", # Stripe secret key (live)
|
||||
r"sk_test_[A-Za-z0-9]{10,}", # Stripe secret key (test)
|
||||
r"rk_live_[A-Za-z0-9]{10,}", # Stripe restricted key
|
||||
r"SG\.[A-Za-z0-9_-]{10,}", # SendGrid API key
|
||||
r"hf_[A-Za-z0-9]{10,}", # HuggingFace token
|
||||
r"r8_[A-Za-z0-9]{10,}", # Replicate API token
|
||||
r"npm_[A-Za-z0-9]{10,}", # npm access token
|
||||
r"pypi-[A-Za-z0-9_-]{10,}", # PyPI API token
|
||||
r"dop_v1_[A-Za-z0-9]{10,}", # DigitalOcean PAT
|
||||
r"doo_v1_[A-Za-z0-9]{10,}", # DigitalOcean OAuth
|
||||
r"am_[A-Za-z0-9_-]{10,}", # AgentMail API key
|
||||
r"sk-[A-Za-z0-9_-]{10,}", # OpenAI / OpenRouter / Anthropic (sk-ant-*)
|
||||
r"ghp_[A-Za-z0-9]{10,}", # GitHub PAT (classic)
|
||||
r"github_pat_[A-Za-z0-9_]{10,}", # GitHub PAT (fine-grained)
|
||||
r"xox[baprs]-[A-Za-z0-9-]{10,}", # Slack tokens
|
||||
r"AIza[A-Za-z0-9_-]{30,}", # Google API keys
|
||||
r"pplx-[A-Za-z0-9]{10,}", # Perplexity
|
||||
r"fal_[A-Za-z0-9_-]{10,}", # Fal.ai
|
||||
r"fc-[A-Za-z0-9]{10,}", # Firecrawl
|
||||
r"bb_live_[A-Za-z0-9_-]{10,}", # BrowserBase
|
||||
r"gAAAA[A-Za-z0-9_=-]{20,}", # Codex encrypted tokens
|
||||
r"AKIA[A-Z0-9]{16}", # AWS Access Key ID
|
||||
r"sk_live_[A-Za-z0-9]{10,}", # Stripe secret key (live)
|
||||
r"sk_test_[A-Za-z0-9]{10,}", # Stripe secret key (test)
|
||||
r"rk_live_[A-Za-z0-9]{10,}", # Stripe restricted key
|
||||
r"SG\.[A-Za-z0-9_-]{10,}", # SendGrid API key
|
||||
r"hf_[A-Za-z0-9]{10,}", # HuggingFace token
|
||||
r"r8_[A-Za-z0-9]{10,}", # Replicate API token
|
||||
r"npm_[A-Za-z0-9]{10,}", # npm access token
|
||||
r"pypi-[A-Za-z0-9_-]{10,}", # PyPI API token
|
||||
r"dop_v1_[A-Za-z0-9]{10,}", # DigitalOcean PAT
|
||||
r"doo_v1_[A-Za-z0-9]{10,}", # DigitalOcean OAuth
|
||||
r"am_[A-Za-z0-9_-]{10,}", # AgentMail API key
|
||||
]
|
||||
|
||||
# ENV assignment patterns: KEY=value where KEY contains a secret-like name
|
||||
@@ -66,9 +65,7 @@ _TELEGRAM_RE = re.compile(
|
||||
)
|
||||
|
||||
# Private key blocks: -----BEGIN RSA PRIVATE KEY----- ... -----END RSA PRIVATE KEY-----
|
||||
_PRIVATE_KEY_RE = re.compile(
|
||||
r"-----BEGIN[A-Z ]*PRIVATE KEY-----[\s\S]*?-----END[A-Z ]*PRIVATE KEY-----"
|
||||
)
|
||||
_PRIVATE_KEY_RE = re.compile(r"-----BEGIN[A-Z ]*PRIVATE KEY-----[\s\S]*?-----END[A-Z ]*PRIVATE KEY-----")
|
||||
|
||||
# Database connection strings: protocol://user:PASSWORD@host
|
||||
# Catches postgres, mysql, mongodb, redis, amqp URLs and redacts the password
|
||||
@@ -82,9 +79,7 @@ _DB_CONNSTR_RE = re.compile(
|
||||
_SIGNAL_PHONE_RE = re.compile(r"(\+[1-9]\d{6,14})(?![A-Za-z0-9])")
|
||||
|
||||
# Compile known prefix patterns into one alternation
|
||||
_PREFIX_RE = re.compile(
|
||||
r"(?<![A-Za-z0-9_-])(" + "|".join(_PREFIX_PATTERNS) + r")(?![A-Za-z0-9_-])"
|
||||
)
|
||||
_PREFIX_RE = re.compile(r"(?<![A-Za-z0-9_-])(" + "|".join(_PREFIX_PATTERNS) + r")(?![A-Za-z0-9_-])")
|
||||
|
||||
|
||||
def _mask_token(token: str) -> str:
|
||||
@@ -112,12 +107,14 @@ def redact_sensitive_text(text: str) -> str:
|
||||
def _redact_env(m):
|
||||
name, quote, value = m.group(1), m.group(2), m.group(3)
|
||||
return f"{name}={quote}{_mask_token(value)}{quote}"
|
||||
|
||||
text = _ENV_ASSIGN_RE.sub(_redact_env, text)
|
||||
|
||||
# JSON fields: "apiKey": "value"
|
||||
def _redact_json(m):
|
||||
key, value = m.group(1), m.group(2)
|
||||
return f'{key}: "{_mask_token(value)}"'
|
||||
|
||||
text = _JSON_FIELD_RE.sub(_redact_json, text)
|
||||
|
||||
# Authorization headers
|
||||
@@ -131,6 +128,7 @@ def redact_sensitive_text(text: str) -> str:
|
||||
prefix = m.group(1) or ""
|
||||
digits = m.group(2)
|
||||
return f"{prefix}{digits}:***"
|
||||
|
||||
text = _TELEGRAM_RE.sub(_redact_telegram, text)
|
||||
|
||||
# Private key blocks
|
||||
@@ -145,6 +143,7 @@ def redact_sensitive_text(text: str) -> str:
|
||||
if len(phone) <= 8:
|
||||
return phone[:2] + "****" + phone[-2:]
|
||||
return phone[:4] + "****" + phone[-4:]
|
||||
|
||||
text = _SIGNAL_PHONE_RE.sub(_redact_phone, text)
|
||||
|
||||
return text
|
||||
@@ -153,7 +152,7 @@ def redact_sensitive_text(text: str) -> str:
|
||||
class RedactingFormatter(logging.Formatter):
|
||||
"""Log formatter that redacts secrets from all log messages."""
|
||||
|
||||
def __init__(self, fmt=None, datefmt=None, style='%', **kwargs):
|
||||
def __init__(self, fmt=None, datefmt=None, style="%", **kwargs):
|
||||
super().__init__(fmt, datefmt, style, **kwargs)
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
|
||||
@@ -6,14 +6,14 @@ can invoke skills via /skill-name commands.
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_skill_commands: Dict[str, Dict[str, Any]] = {}
|
||||
_skill_commands: dict[str, dict[str, Any]] = {}
|
||||
|
||||
|
||||
def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
def scan_skill_commands() -> dict[str, dict[str, Any]]:
|
||||
"""Scan ~/.hermes/skills/ and return a mapping of /command -> skill info.
|
||||
|
||||
Returns:
|
||||
@@ -23,26 +23,27 @@ def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
_skill_commands = {}
|
||||
try:
|
||||
from tools.skills_tool import SKILLS_DIR, _parse_frontmatter, skill_matches_platform
|
||||
|
||||
if not SKILLS_DIR.exists():
|
||||
return _skill_commands
|
||||
for skill_md in SKILLS_DIR.rglob("SKILL.md"):
|
||||
if any(part in ('.git', '.github', '.hub') for part in skill_md.parts):
|
||||
if any(part in (".git", ".github", ".hub") for part in skill_md.parts):
|
||||
continue
|
||||
try:
|
||||
content = skill_md.read_text(encoding='utf-8')
|
||||
content = skill_md.read_text(encoding="utf-8")
|
||||
frontmatter, body = _parse_frontmatter(content)
|
||||
# Skip skills incompatible with the current OS platform
|
||||
if not skill_matches_platform(frontmatter):
|
||||
continue
|
||||
name = frontmatter.get('name', skill_md.parent.name)
|
||||
description = frontmatter.get('description', '')
|
||||
name = frontmatter.get("name", skill_md.parent.name)
|
||||
description = frontmatter.get("description", "")
|
||||
if not description:
|
||||
for line in body.strip().split('\n'):
|
||||
for line in body.strip().split("\n"):
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
if line and not line.startswith("#"):
|
||||
description = line[:80]
|
||||
break
|
||||
cmd_name = name.lower().replace(' ', '-').replace('_', '-')
|
||||
cmd_name = name.lower().replace(" ", "-").replace("_", "-")
|
||||
_skill_commands[f"/{cmd_name}"] = {
|
||||
"name": name,
|
||||
"description": description or f"Invoke the {name} skill",
|
||||
@@ -56,14 +57,14 @@ def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
return _skill_commands
|
||||
|
||||
|
||||
def get_skill_commands() -> Dict[str, Dict[str, Any]]:
|
||||
def get_skill_commands() -> dict[str, dict[str, Any]]:
|
||||
"""Return the current skill commands mapping (scan first if empty)."""
|
||||
if not _skill_commands:
|
||||
scan_skill_commands()
|
||||
return _skill_commands
|
||||
|
||||
|
||||
def build_skill_invocation_message(cmd_key: str, user_instruction: str = "") -> Optional[str]:
|
||||
def build_skill_invocation_message(cmd_key: str, user_instruction: str = "") -> str | None:
|
||||
"""Build the user message content for a skill slash command invocation.
|
||||
|
||||
Args:
|
||||
@@ -83,7 +84,7 @@ def build_skill_invocation_message(cmd_key: str, user_instruction: str = "") ->
|
||||
skill_name = skill_info["name"]
|
||||
|
||||
try:
|
||||
content = skill_md_path.read_text(encoding='utf-8')
|
||||
content = skill_md_path.read_text(encoding="utf-8")
|
||||
except Exception:
|
||||
return f"[Failed to load skill: {skill_name}]"
|
||||
|
||||
@@ -111,6 +112,8 @@ def build_skill_invocation_message(cmd_key: str, user_instruction: str = "") ->
|
||||
|
||||
if user_instruction:
|
||||
parts.append("")
|
||||
parts.append(f"The user has provided the following instruction alongside the skill invocation: {user_instruction}")
|
||||
parts.append(
|
||||
f"The user has provided the following instruction alongside the skill invocation: {user_instruction}"
|
||||
)
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
@@ -8,7 +8,7 @@ the file-write logic live here.
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -27,8 +27,7 @@ def has_incomplete_scratchpad(content: str) -> bool:
|
||||
return "<REASONING_SCRATCHPAD>" in content and "</REASONING_SCRATCHPAD>" not in content
|
||||
|
||||
|
||||
def save_trajectory(trajectory: List[Dict[str, Any]], model: str,
|
||||
completed: bool, filename: str = None):
|
||||
def save_trajectory(trajectory: list[dict[str, Any]], model: str, completed: bool, filename: str = None):
|
||||
"""Append a trajectory entry to a JSONL file.
|
||||
|
||||
Args:
|
||||
|
||||
572
batch_runner.py
572
batch_runner.py
File diff suppressed because it is too large
Load Diff
@@ -555,6 +555,21 @@ toolsets:
|
||||
# args: ["-y", "@modelcontextprotocol/server-github"]
|
||||
# env:
|
||||
# GITHUB_PERSONAL_ACCESS_TOKEN: "ghp_..."
|
||||
#
|
||||
# Sampling (server-initiated LLM requests) — enabled by default.
|
||||
# Per-server config under the 'sampling' key:
|
||||
# analysis:
|
||||
# command: npx
|
||||
# args: ["-y", "analysis-server"]
|
||||
# sampling:
|
||||
# enabled: true # default: true
|
||||
# model: "gemini-3-flash" # override model (optional)
|
||||
# max_tokens_cap: 4096 # max tokens per request
|
||||
# timeout: 30 # LLM call timeout (seconds)
|
||||
# max_rpm: 10 # max requests per minute
|
||||
# allowed_models: [] # model whitelist (empty = all)
|
||||
# max_tool_rounds: 5 # tool loop limit (0 = disable)
|
||||
# log_level: "info" # audit verbosity
|
||||
|
||||
# =============================================================================
|
||||
# Voice Transcription (Speech-to-Text)
|
||||
|
||||
@@ -15,18 +15,18 @@ duplicate execution if multiple processes overlap.
|
||||
"""
|
||||
|
||||
from cron.jobs import (
|
||||
JOBS_FILE,
|
||||
create_job,
|
||||
get_job,
|
||||
list_jobs,
|
||||
remove_job,
|
||||
update_job,
|
||||
JOBS_FILE,
|
||||
)
|
||||
from cron.scheduler import tick
|
||||
|
||||
__all__ = [
|
||||
"create_job",
|
||||
"get_job",
|
||||
"get_job",
|
||||
"list_jobs",
|
||||
"remove_job",
|
||||
"update_job",
|
||||
|
||||
149
cron/jobs.py
149
cron/jobs.py
@@ -6,18 +6,19 @@ Output is saved to ~/.hermes/cron/output/{job_id}/{timestamp}.md
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import uuid
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, List, Any
|
||||
from typing import Any
|
||||
|
||||
from hermes_time import now as _hermes_now
|
||||
|
||||
try:
|
||||
from croniter import croniter
|
||||
|
||||
HAS_CRONITER = True
|
||||
except ImportError:
|
||||
HAS_CRONITER = False
|
||||
@@ -42,37 +43,38 @@ def ensure_dirs():
|
||||
# Schedule Parsing
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def parse_duration(s: str) -> int:
|
||||
"""
|
||||
Parse duration string into minutes.
|
||||
|
||||
|
||||
Examples:
|
||||
"30m" → 30
|
||||
"2h" → 120
|
||||
"1d" → 1440
|
||||
"""
|
||||
s = s.strip().lower()
|
||||
match = re.match(r'^(\d+)\s*(m|min|mins|minute|minutes|h|hr|hrs|hour|hours|d|day|days)$', s)
|
||||
match = re.match(r"^(\d+)\s*(m|min|mins|minute|minutes|h|hr|hrs|hour|hours|d|day|days)$", s)
|
||||
if not match:
|
||||
raise ValueError(f"Invalid duration: '{s}'. Use format like '30m', '2h', or '1d'")
|
||||
|
||||
|
||||
value = int(match.group(1))
|
||||
unit = match.group(2)[0] # First char: m, h, or d
|
||||
|
||||
multipliers = {'m': 1, 'h': 60, 'd': 1440}
|
||||
|
||||
multipliers = {"m": 1, "h": 60, "d": 1440}
|
||||
return value * multipliers[unit]
|
||||
|
||||
|
||||
def parse_schedule(schedule: str) -> Dict[str, Any]:
|
||||
def parse_schedule(schedule: str) -> dict[str, Any]:
|
||||
"""
|
||||
Parse schedule string into structured format.
|
||||
|
||||
|
||||
Returns dict with:
|
||||
- kind: "once" | "interval" | "cron"
|
||||
- For "once": "run_at" (ISO timestamp)
|
||||
- For "interval": "minutes" (int)
|
||||
- For "cron": "expr" (cron expression)
|
||||
|
||||
|
||||
Examples:
|
||||
"30m" → once in 30 minutes
|
||||
"2h" → once in 2 hours
|
||||
@@ -84,23 +86,17 @@ def parse_schedule(schedule: str) -> Dict[str, Any]:
|
||||
schedule = schedule.strip()
|
||||
original = schedule
|
||||
schedule_lower = schedule.lower()
|
||||
|
||||
|
||||
# "every X" pattern → recurring interval
|
||||
if schedule_lower.startswith("every "):
|
||||
duration_str = schedule[6:].strip()
|
||||
minutes = parse_duration(duration_str)
|
||||
return {
|
||||
"kind": "interval",
|
||||
"minutes": minutes,
|
||||
"display": f"every {minutes}m"
|
||||
}
|
||||
|
||||
return {"kind": "interval", "minutes": minutes, "display": f"every {minutes}m"}
|
||||
|
||||
# Check for cron expression (5 or 6 space-separated fields)
|
||||
# Cron fields: minute hour day month weekday [year]
|
||||
parts = schedule.split()
|
||||
if len(parts) >= 5 and all(
|
||||
re.match(r'^[\d\*\-,/]+$', p) for p in parts[:5]
|
||||
):
|
||||
if len(parts) >= 5 and all(re.match(r"^[\d\*\-,/]+$", p) for p in parts[:5]):
|
||||
if not HAS_CRONITER:
|
||||
raise ValueError("Cron expressions require 'croniter' package. Install with: pip install croniter")
|
||||
# Validate cron expression
|
||||
@@ -108,37 +104,25 @@ def parse_schedule(schedule: str) -> Dict[str, Any]:
|
||||
croniter(schedule)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid cron expression '{schedule}': {e}")
|
||||
return {
|
||||
"kind": "cron",
|
||||
"expr": schedule,
|
||||
"display": schedule
|
||||
}
|
||||
|
||||
return {"kind": "cron", "expr": schedule, "display": schedule}
|
||||
|
||||
# ISO timestamp (contains T or looks like date)
|
||||
if 'T' in schedule or re.match(r'^\d{4}-\d{2}-\d{2}', schedule):
|
||||
if "T" in schedule or re.match(r"^\d{4}-\d{2}-\d{2}", schedule):
|
||||
try:
|
||||
# Parse and validate
|
||||
dt = datetime.fromisoformat(schedule.replace('Z', '+00:00'))
|
||||
return {
|
||||
"kind": "once",
|
||||
"run_at": dt.isoformat(),
|
||||
"display": f"once at {dt.strftime('%Y-%m-%d %H:%M')}"
|
||||
}
|
||||
dt = datetime.fromisoformat(schedule.replace("Z", "+00:00"))
|
||||
return {"kind": "once", "run_at": dt.isoformat(), "display": f"once at {dt.strftime('%Y-%m-%d %H:%M')}"}
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid timestamp '{schedule}': {e}")
|
||||
|
||||
|
||||
# Duration like "30m", "2h", "1d" → one-shot from now
|
||||
try:
|
||||
minutes = parse_duration(schedule)
|
||||
run_at = _hermes_now() + timedelta(minutes=minutes)
|
||||
return {
|
||||
"kind": "once",
|
||||
"run_at": run_at.isoformat(),
|
||||
"display": f"once in {original}"
|
||||
}
|
||||
return {"kind": "once", "run_at": run_at.isoformat(), "display": f"once in {original}"}
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
raise ValueError(
|
||||
f"Invalid schedule '{original}'. Use:\n"
|
||||
f" - Duration: '30m', '2h', '1d' (one-shot)\n"
|
||||
@@ -161,7 +145,7 @@ def _ensure_aware(dt: datetime) -> datetime:
|
||||
return dt
|
||||
|
||||
|
||||
def compute_next_run(schedule: Dict[str, Any], last_run_at: Optional[str] = None) -> Optional[str]:
|
||||
def compute_next_run(schedule: dict[str, Any], last_run_at: str | None = None) -> str | None:
|
||||
"""
|
||||
Compute the next run time for a schedule.
|
||||
|
||||
@@ -199,26 +183,27 @@ def compute_next_run(schedule: Dict[str, Any], last_run_at: Optional[str] = None
|
||||
# Job CRUD Operations
|
||||
# =============================================================================
|
||||
|
||||
def load_jobs() -> List[Dict[str, Any]]:
|
||||
|
||||
def load_jobs() -> list[dict[str, Any]]:
|
||||
"""Load all jobs from storage."""
|
||||
ensure_dirs()
|
||||
if not JOBS_FILE.exists():
|
||||
return []
|
||||
|
||||
|
||||
try:
|
||||
with open(JOBS_FILE, 'r', encoding='utf-8') as f:
|
||||
with open(JOBS_FILE, encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
return data.get("jobs", [])
|
||||
except (json.JSONDecodeError, IOError):
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return []
|
||||
|
||||
|
||||
def save_jobs(jobs: List[Dict[str, Any]]):
|
||||
def save_jobs(jobs: list[dict[str, Any]]):
|
||||
"""Save all jobs to storage."""
|
||||
ensure_dirs()
|
||||
fd, tmp_path = tempfile.mkstemp(dir=str(JOBS_FILE.parent), suffix='.tmp', prefix='.jobs_')
|
||||
fd, tmp_path = tempfile.mkstemp(dir=str(JOBS_FILE.parent), suffix=".tmp", prefix=".jobs_")
|
||||
try:
|
||||
with os.fdopen(fd, 'w', encoding='utf-8') as f:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
||||
json.dump({"jobs": jobs, "updated_at": _hermes_now().isoformat()}, f, indent=2)
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
@@ -234,14 +219,14 @@ def save_jobs(jobs: List[Dict[str, Any]]):
|
||||
def create_job(
|
||||
prompt: str,
|
||||
schedule: str,
|
||||
name: Optional[str] = None,
|
||||
repeat: Optional[int] = None,
|
||||
deliver: Optional[str] = None,
|
||||
origin: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
name: str | None = None,
|
||||
repeat: int | None = None,
|
||||
deliver: str | None = None,
|
||||
origin: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Create a new cron job.
|
||||
|
||||
|
||||
Args:
|
||||
prompt: The prompt to run (must be self-contained)
|
||||
schedule: Schedule string (see parse_schedule)
|
||||
@@ -249,23 +234,23 @@ def create_job(
|
||||
repeat: How many times to run (None = forever, 1 = once)
|
||||
deliver: Where to deliver output ("origin", "local", "telegram", etc.)
|
||||
origin: Source info where job was created (for "origin" delivery)
|
||||
|
||||
|
||||
Returns:
|
||||
The created job dict
|
||||
"""
|
||||
parsed_schedule = parse_schedule(schedule)
|
||||
|
||||
|
||||
# Auto-set repeat=1 for one-shot schedules if not specified
|
||||
if parsed_schedule["kind"] == "once" and repeat is None:
|
||||
repeat = 1
|
||||
|
||||
|
||||
# Default delivery to origin if available, otherwise local
|
||||
if deliver is None:
|
||||
deliver = "origin" if origin else "local"
|
||||
|
||||
|
||||
job_id = uuid.uuid4().hex[:12]
|
||||
now = _hermes_now().isoformat()
|
||||
|
||||
|
||||
job = {
|
||||
"id": job_id,
|
||||
"name": name or prompt[:50].strip(),
|
||||
@@ -274,7 +259,7 @@ def create_job(
|
||||
"schedule_display": parsed_schedule.get("display", schedule),
|
||||
"repeat": {
|
||||
"times": repeat, # None = forever
|
||||
"completed": 0
|
||||
"completed": 0,
|
||||
},
|
||||
"enabled": True,
|
||||
"created_at": now,
|
||||
@@ -286,15 +271,15 @@ def create_job(
|
||||
"deliver": deliver,
|
||||
"origin": origin, # Tracks where job was created for "origin" delivery
|
||||
}
|
||||
|
||||
|
||||
jobs = load_jobs()
|
||||
jobs.append(job)
|
||||
save_jobs(jobs)
|
||||
|
||||
|
||||
return job
|
||||
|
||||
|
||||
def get_job(job_id: str) -> Optional[Dict[str, Any]]:
|
||||
def get_job(job_id: str) -> dict[str, Any] | None:
|
||||
"""Get a job by ID."""
|
||||
jobs = load_jobs()
|
||||
for job in jobs:
|
||||
@@ -303,7 +288,7 @@ def get_job(job_id: str) -> Optional[Dict[str, Any]]:
|
||||
return None
|
||||
|
||||
|
||||
def list_jobs(include_disabled: bool = False) -> List[Dict[str, Any]]:
|
||||
def list_jobs(include_disabled: bool = False) -> list[dict[str, Any]]:
|
||||
"""List all jobs, optionally including disabled ones."""
|
||||
jobs = load_jobs()
|
||||
if not include_disabled:
|
||||
@@ -311,7 +296,7 @@ def list_jobs(include_disabled: bool = False) -> List[Dict[str, Any]]:
|
||||
return jobs
|
||||
|
||||
|
||||
def update_job(job_id: str, updates: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
def update_job(job_id: str, updates: dict[str, Any]) -> dict[str, Any] | None:
|
||||
"""Update a job by ID."""
|
||||
jobs = load_jobs()
|
||||
for i, job in enumerate(jobs):
|
||||
@@ -333,10 +318,10 @@ def remove_job(job_id: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def mark_job_run(job_id: str, success: bool, error: Optional[str] = None):
|
||||
def mark_job_run(job_id: str, success: bool, error: str | None = None):
|
||||
"""
|
||||
Mark a job as having been run.
|
||||
|
||||
|
||||
Updates last_run_at, last_status, increments completed count,
|
||||
computes next_run_at, and auto-deletes if repeat limit reached.
|
||||
"""
|
||||
@@ -347,11 +332,11 @@ def mark_job_run(job_id: str, success: bool, error: Optional[str] = None):
|
||||
job["last_run_at"] = now
|
||||
job["last_status"] = "ok" if success else "error"
|
||||
job["last_error"] = error if not success else None
|
||||
|
||||
|
||||
# Increment completed count
|
||||
if job.get("repeat"):
|
||||
job["repeat"]["completed"] = job["repeat"].get("completed", 0) + 1
|
||||
|
||||
|
||||
# Check if we've hit the repeat limit
|
||||
times = job["repeat"].get("times")
|
||||
completed = job["repeat"]["completed"]
|
||||
@@ -360,38 +345,38 @@ def mark_job_run(job_id: str, success: bool, error: Optional[str] = None):
|
||||
jobs.pop(i)
|
||||
save_jobs(jobs)
|
||||
return
|
||||
|
||||
|
||||
# Compute next run
|
||||
job["next_run_at"] = compute_next_run(job["schedule"], now)
|
||||
|
||||
|
||||
# If no next run (one-shot completed), disable
|
||||
if job["next_run_at"] is None:
|
||||
job["enabled"] = False
|
||||
|
||||
|
||||
save_jobs(jobs)
|
||||
return
|
||||
|
||||
|
||||
save_jobs(jobs)
|
||||
|
||||
|
||||
def get_due_jobs() -> List[Dict[str, Any]]:
|
||||
def get_due_jobs() -> list[dict[str, Any]]:
|
||||
"""Get all jobs that are due to run now."""
|
||||
now = _hermes_now()
|
||||
jobs = load_jobs()
|
||||
due = []
|
||||
|
||||
|
||||
for job in jobs:
|
||||
if not job.get("enabled", True):
|
||||
continue
|
||||
|
||||
|
||||
next_run = job.get("next_run_at")
|
||||
if not next_run:
|
||||
continue
|
||||
|
||||
|
||||
next_run_dt = _ensure_aware(datetime.fromisoformat(next_run))
|
||||
if next_run_dt <= now:
|
||||
due.append(job)
|
||||
|
||||
|
||||
return due
|
||||
|
||||
|
||||
@@ -400,11 +385,11 @@ def save_job_output(job_id: str, output: str):
|
||||
ensure_dirs()
|
||||
job_output_dir = OUTPUT_DIR / job_id
|
||||
job_output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
timestamp = _hermes_now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
output_file = job_output_dir / f"{timestamp}.md"
|
||||
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
|
||||
with open(output_file, "w", encoding="utf-8") as f:
|
||||
f.write(output)
|
||||
|
||||
|
||||
return output_file
|
||||
|
||||
@@ -23,9 +23,7 @@ except ImportError:
|
||||
import msvcrt
|
||||
except ImportError:
|
||||
msvcrt = None
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from hermes_time import now as _hermes_now
|
||||
|
||||
@@ -44,7 +42,7 @@ _LOCK_DIR = _hermes_home / "cron"
|
||||
_LOCK_FILE = _LOCK_DIR / ".tick.lock"
|
||||
|
||||
|
||||
def _resolve_origin(job: dict) -> Optional[dict]:
|
||||
def _resolve_origin(job: dict) -> dict | None:
|
||||
"""Extract origin info from a job, returning {platform, chat_id, chat_name} or None."""
|
||||
origin = job.get("origin")
|
||||
if not origin:
|
||||
@@ -87,11 +85,16 @@ def _deliver_result(job: dict, content: str) -> None:
|
||||
# Fall back to home channel
|
||||
chat_id = os.getenv(f"{platform_name.upper()}_HOME_CHANNEL", "")
|
||||
if not chat_id:
|
||||
logger.warning("Job '%s' deliver=%s but no chat_id or home channel. Set via: hermes config set %s_HOME_CHANNEL <channel_id>", job["id"], deliver, platform_name.upper())
|
||||
logger.warning(
|
||||
"Job '%s' deliver=%s but no chat_id or home channel. Set via: hermes config set %s_HOME_CHANNEL <channel_id>",
|
||||
job["id"],
|
||||
deliver,
|
||||
platform_name.upper(),
|
||||
)
|
||||
return
|
||||
|
||||
from gateway.config import Platform, load_gateway_config
|
||||
from tools.send_message_tool import _send_to_platform
|
||||
from gateway.config import load_gateway_config, Platform
|
||||
|
||||
platform_map = {
|
||||
"telegram": Platform.TELEGRAM,
|
||||
@@ -123,6 +126,7 @@ def _deliver_result(job: dict, content: str) -> None:
|
||||
# asyncio.run() fails if there's already a running loop in this thread;
|
||||
# spin up a new thread to avoid that.
|
||||
import concurrent.futures
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
|
||||
future = pool.submit(asyncio.run, _send_to_platform(platform, pconfig, chat_id, content))
|
||||
result = future.result(timeout=30)
|
||||
@@ -137,25 +141,26 @@ def _deliver_result(job: dict, content: str) -> None:
|
||||
# Mirror the delivered content into the target's gateway session
|
||||
try:
|
||||
from gateway.mirror import mirror_to_session
|
||||
|
||||
mirror_to_session(platform_name, chat_id, content, source_label="cron")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
def run_job(job: dict) -> tuple[bool, str, str, str | None]:
|
||||
"""
|
||||
Execute a single cron job.
|
||||
|
||||
|
||||
Returns:
|
||||
Tuple of (success, full_output_doc, final_response, error_message)
|
||||
"""
|
||||
from run_agent import AIAgent
|
||||
|
||||
|
||||
job_id = job["id"]
|
||||
job_name = job["name"]
|
||||
prompt = job["prompt"]
|
||||
origin = _resolve_origin(job)
|
||||
|
||||
|
||||
logger.info("Running job '%s' (ID: %s)", job_name, job_id)
|
||||
logger.info("Prompt: %s", prompt[:100])
|
||||
|
||||
@@ -170,6 +175,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
# Re-read .env and config.yaml fresh every run so provider/key
|
||||
# changes take effect without a gateway restart.
|
||||
from dotenv import load_dotenv
|
||||
|
||||
try:
|
||||
load_dotenv(str(_hermes_home / ".env"), override=True, encoding="utf-8")
|
||||
except UnicodeDecodeError:
|
||||
@@ -181,6 +187,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
_cfg = {}
|
||||
try:
|
||||
import yaml
|
||||
|
||||
_cfg_path = str(_hermes_home / "config.yaml")
|
||||
if os.path.exists(_cfg_path):
|
||||
with open(_cfg_path) as _f:
|
||||
@@ -210,12 +217,13 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
prefill_file = os.getenv("HERMES_PREFILL_MESSAGES_FILE", "") or _cfg.get("prefill_messages_file", "")
|
||||
if prefill_file:
|
||||
import json as _json
|
||||
|
||||
pfpath = Path(prefill_file).expanduser()
|
||||
if not pfpath.is_absolute():
|
||||
pfpath = _hermes_home / pfpath
|
||||
if pfpath.exists():
|
||||
try:
|
||||
with open(pfpath, "r", encoding="utf-8") as _pf:
|
||||
with open(pfpath, encoding="utf-8") as _pf:
|
||||
prefill_messages = _json.load(_pf)
|
||||
if not isinstance(prefill_messages, list):
|
||||
prefill_messages = None
|
||||
@@ -229,9 +237,10 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
pr = _cfg.get("provider_routing", {})
|
||||
|
||||
from hermes_cli.runtime_provider import (
|
||||
resolve_runtime_provider,
|
||||
format_runtime_provider_error,
|
||||
resolve_runtime_provider,
|
||||
)
|
||||
|
||||
try:
|
||||
runtime = resolve_runtime_provider(
|
||||
requested=os.getenv("HERMES_INFERENCE_PROVIDER"),
|
||||
@@ -254,20 +263,20 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
providers_order=pr.get("order"),
|
||||
provider_sort=pr.get("sort"),
|
||||
quiet_mode=True,
|
||||
session_id=f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}"
|
||||
session_id=f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}",
|
||||
)
|
||||
|
||||
|
||||
result = agent.run_conversation(prompt)
|
||||
|
||||
|
||||
final_response = result.get("final_response", "")
|
||||
if not final_response:
|
||||
final_response = "(No response generated)"
|
||||
|
||||
|
||||
output = f"""# Cron Job: {job_name}
|
||||
|
||||
**Job ID:** {job_id}
|
||||
**Run Time:** {_hermes_now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||
**Schedule:** {job.get('schedule_display', 'N/A')}
|
||||
**Run Time:** {_hermes_now().strftime("%Y-%m-%d %H:%M:%S")}
|
||||
**Schedule:** {job.get("schedule_display", "N/A")}
|
||||
|
||||
## Prompt
|
||||
|
||||
@@ -277,19 +286,19 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
|
||||
{final_response}
|
||||
"""
|
||||
|
||||
|
||||
logger.info("Job '%s' completed successfully", job_name)
|
||||
return True, output, final_response, None
|
||||
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"{type(e).__name__}: {str(e)}"
|
||||
logger.error("Job '%s' failed: %s", job_name, error_msg)
|
||||
|
||||
|
||||
output = f"""# Cron Job: {job_name} (FAILED)
|
||||
|
||||
**Job ID:** {job_id}
|
||||
**Run Time:** {_hermes_now().strftime('%Y-%m-%d %H:%M:%S')}
|
||||
**Schedule:** {job.get('schedule_display', 'N/A')}
|
||||
**Run Time:** {_hermes_now().strftime("%Y-%m-%d %H:%M:%S")}
|
||||
**Schedule:** {job.get("schedule_display", "N/A")}
|
||||
|
||||
## Prompt
|
||||
|
||||
@@ -314,13 +323,13 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
def tick(verbose: bool = True) -> int:
|
||||
"""
|
||||
Check and run all due jobs.
|
||||
|
||||
|
||||
Uses a file lock so only one tick runs at a time, even if the gateway's
|
||||
in-process ticker and a standalone daemon or manual tick overlap.
|
||||
|
||||
|
||||
Args:
|
||||
verbose: Whether to print status messages
|
||||
|
||||
|
||||
Returns:
|
||||
Number of jobs executed (0 if another tick is already running)
|
||||
"""
|
||||
@@ -334,7 +343,7 @@ def tick(verbose: bool = True) -> int:
|
||||
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
elif msvcrt:
|
||||
msvcrt.locking(lock_fd.fileno(), msvcrt.LK_NBLCK, 1)
|
||||
except (OSError, IOError):
|
||||
except OSError:
|
||||
logger.debug("Tick skipped — another instance holds the lock")
|
||||
if lock_fd is not None:
|
||||
lock_fd.close()
|
||||
@@ -344,11 +353,11 @@ def tick(verbose: bool = True) -> int:
|
||||
due_jobs = get_due_jobs()
|
||||
|
||||
if verbose and not due_jobs:
|
||||
logger.info("%s - No jobs due", _hermes_now().strftime('%H:%M:%S'))
|
||||
logger.info("%s - No jobs due", _hermes_now().strftime("%H:%M:%S"))
|
||||
return 0
|
||||
|
||||
if verbose:
|
||||
logger.info("%s - %s job(s) due", _hermes_now().strftime('%H:%M:%S'), len(due_jobs))
|
||||
logger.info("%s - %s job(s) due", _hermes_now().strftime("%H:%M:%S"), len(due_jobs))
|
||||
|
||||
executed = 0
|
||||
for job in due_jobs:
|
||||
@@ -360,7 +369,9 @@ def tick(verbose: bool = True) -> int:
|
||||
logger.info("Output saved to: %s", output_file)
|
||||
|
||||
# Deliver the final response to the origin/target chat
|
||||
deliver_content = final_response if success else f"⚠️ Cron job '{job.get('name', job['id'])}' failed:\n{error}"
|
||||
deliver_content = (
|
||||
final_response if success else f"⚠️ Cron job '{job.get('name', job['id'])}' failed:\n{error}"
|
||||
)
|
||||
if deliver_content:
|
||||
try:
|
||||
_deliver_result(job, deliver_content)
|
||||
@@ -371,7 +382,7 @@ def tick(verbose: bool = True) -> int:
|
||||
executed += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error processing job %s: %s", job['id'], e)
|
||||
logger.error("Error processing job %s: %s", job["id"], e)
|
||||
mark_job_run(job["id"], False, str(e))
|
||||
|
||||
return executed
|
||||
@@ -381,7 +392,7 @@ def tick(verbose: bool = True) -> int:
|
||||
elif msvcrt:
|
||||
try:
|
||||
msvcrt.locking(lock_fd.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
except (OSError, IOError):
|
||||
except OSError:
|
||||
pass
|
||||
lock_fd.close()
|
||||
|
||||
|
||||
46
datagen-config-examples/web_research.yaml
Normal file
46
datagen-config-examples/web_research.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
# datagen-config-examples/web_research.yaml
|
||||
#
|
||||
# Batch data generation config for WebResearchEnv.
|
||||
# Generates tool-calling trajectories for multi-step web research tasks.
|
||||
#
|
||||
# Usage:
|
||||
# python batch_runner.py \
|
||||
# --config datagen-config-examples/web_research.yaml \
|
||||
# --run_name web_research_v1
|
||||
|
||||
environment: web-research
|
||||
|
||||
# Toolsets available to the agent during data generation
|
||||
toolsets:
|
||||
- web
|
||||
- file
|
||||
|
||||
# How many parallel workers to use
|
||||
num_workers: 4
|
||||
|
||||
# Questions per batch
|
||||
batch_size: 20
|
||||
|
||||
# Total trajectories to generate (comment out to run full dataset)
|
||||
max_items: 500
|
||||
|
||||
# Model to use for generation (override with --model flag)
|
||||
model: openrouter/nousresearch/hermes-3-llama-3.1-405b
|
||||
|
||||
# System prompt additions (ephemeral — not saved to trajectories)
|
||||
ephemeral_system_prompt: |
|
||||
You are a highly capable research agent. When asked a factual question,
|
||||
always use web_search to find current, accurate information before answering.
|
||||
Cite at least 2 sources. Be concise and accurate.
|
||||
|
||||
# Output directory
|
||||
output_dir: data/web_research_v1
|
||||
|
||||
# Trajectory compression settings (for fitting into training token budgets)
|
||||
compression:
|
||||
enabled: true
|
||||
target_max_tokens: 16000
|
||||
|
||||
# Eval settings
|
||||
eval_every: 100 # Run eval every N trajectories
|
||||
eval_size: 25 # Number of held-out questions per eval run
|
||||
643
environments/web_research_env.py
Normal file
643
environments/web_research_env.py
Normal file
@@ -0,0 +1,643 @@
|
||||
"""
|
||||
WebResearchEnv — RL Environment for Multi-Step Web Research
|
||||
============================================================
|
||||
|
||||
Trains models to do accurate, efficient, multi-source web research.
|
||||
|
||||
Reward signals:
|
||||
- Answer correctness (LLM judge, 0.0–1.0)
|
||||
- Source diversity (used ≥2 distinct domains)
|
||||
- Efficiency (penalizes excessive tool calls)
|
||||
- Tool usage (bonus for actually using web tools)
|
||||
|
||||
Dataset: FRAMES benchmark (Google, 2024) — multi-hop factual questions
|
||||
HuggingFace: google/frames-benchmark
|
||||
Fallback: built-in sample questions (no HF token needed)
|
||||
|
||||
Usage:
|
||||
# Phase 1 (OpenAI-compatible server)
|
||||
python environments/web_research_env.py serve \\
|
||||
--openai.base_url http://localhost:8000/v1 \\
|
||||
--openai.model_name YourModel \\
|
||||
--openai.server_type openai
|
||||
|
||||
# Process mode (offline data generation)
|
||||
python environments/web_research_env.py process \\
|
||||
--env.data_path_to_save_groups data/web_research.jsonl
|
||||
|
||||
# Standalone eval
|
||||
python environments/web_research_env.py evaluate \\
|
||||
--openai.base_url http://localhost:8000/v1 \\
|
||||
--openai.model_name YourModel
|
||||
|
||||
Built by: github.com/jackx707
|
||||
Inspired by: GroceryMind — production Hermes agent doing live web research
|
||||
across German grocery stores (firecrawl + hermes-agent)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
# Ensure hermes-agent root is on path
|
||||
_repo_root = Path(__file__).resolve().parent.parent
|
||||
if str(_repo_root) not in sys.path:
|
||||
sys.path.insert(0, str(_repo_root))
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Optional HuggingFace datasets import
|
||||
# ---------------------------------------------------------------------------
|
||||
try:
|
||||
from datasets import load_dataset
|
||||
HF_AVAILABLE = True
|
||||
except ImportError:
|
||||
HF_AVAILABLE = False
|
||||
|
||||
from atroposlib.envs.base import ScoredDataGroup
|
||||
from atroposlib.envs.server_handling.server_manager import APIServerConfig
|
||||
from atroposlib.type_definitions import Item
|
||||
|
||||
from environments.hermes_base_env import HermesAgentBaseEnv, HermesAgentEnvConfig
|
||||
from environments.agent_loop import AgentResult
|
||||
from environments.tool_context import ToolContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fallback sample dataset (used when HuggingFace is unavailable)
|
||||
# Multi-hop questions requiring real web search to answer.
|
||||
# ---------------------------------------------------------------------------
|
||||
SAMPLE_QUESTIONS = [
|
||||
{
|
||||
"question": "What is the current population of the capital city of the country that won the 2022 FIFA World Cup?",
|
||||
"answer": "Buenos Aires has approximately 3 million people in the city proper, or around 15 million in the greater metro area.",
|
||||
"difficulty": "medium",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "Who is the CEO of the company that makes the most widely used open-source container orchestration platform?",
|
||||
"answer": "The Linux Foundation oversees Kubernetes. CNCF (Cloud Native Computing Foundation) is the specific body — it does not have a traditional CEO but has an executive director.",
|
||||
"difficulty": "medium",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "What programming language was used to write the original version of the web framework used by Instagram?",
|
||||
"answer": "Django, which Instagram was built on, is written in Python.",
|
||||
"difficulty": "easy",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "In what year was the university founded where the inventor of the World Wide Web currently holds a professorship?",
|
||||
"answer": "Tim Berners-Lee holds a professorship at MIT (founded 1861) and the University of Southampton (founded 1952).",
|
||||
"difficulty": "hard",
|
||||
"hops": 3,
|
||||
},
|
||||
{
|
||||
"question": "What is the latest stable version of the programming language that ranks #1 on the TIOBE index as of this year?",
|
||||
"answer": "Python is currently #1 on TIOBE. The latest stable version should be verified via the official python.org site.",
|
||||
"difficulty": "medium",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "How many employees does the parent company of Instagram have?",
|
||||
"answer": "Meta Platforms (parent of Instagram) employs approximately 70,000+ people as of recent reports.",
|
||||
"difficulty": "medium",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "What is the current interest rate set by the central bank of the country where the Eiffel Tower is located?",
|
||||
"answer": "The European Central Bank sets rates for France/eurozone. The current rate should be verified — it has changed frequently in 2023-2025.",
|
||||
"difficulty": "hard",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "Which company acquired the startup founded by the creator of Oculus VR?",
|
||||
"answer": "Palmer Luckey founded Oculus VR, which was acquired by Facebook (now Meta). He later founded Anduril Industries.",
|
||||
"difficulty": "medium",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "What is the market cap of the company that owns the most popular search engine in Russia?",
|
||||
"answer": "Yandex (now split into separate entities after 2024 restructuring). Current market cap should be verified via financial sources.",
|
||||
"difficulty": "hard",
|
||||
"hops": 2,
|
||||
},
|
||||
{
|
||||
"question": "What was the GDP growth rate of the country that hosted the most recent Summer Olympics?",
|
||||
"answer": "Paris, France hosted the 2024 Summer Olympics. France's recent GDP growth should be verified via World Bank or IMF data.",
|
||||
"difficulty": "hard",
|
||||
"hops": 2,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class WebResearchEnvConfig(HermesAgentEnvConfig):
|
||||
"""Configuration for the web research RL environment."""
|
||||
|
||||
# Reward weights
|
||||
correctness_weight: float = Field(
|
||||
default=0.6,
|
||||
description="Weight for answer correctness in reward (LLM judge score).",
|
||||
)
|
||||
tool_usage_weight: float = Field(
|
||||
default=0.2,
|
||||
description="Weight for tool usage signal (did the model actually use web tools?).",
|
||||
)
|
||||
efficiency_weight: float = Field(
|
||||
default=0.2,
|
||||
description="Weight for efficiency signal (penalizes excessive tool calls).",
|
||||
)
|
||||
diversity_bonus: float = Field(
|
||||
default=0.1,
|
||||
description="Bonus reward for citing ≥2 distinct domains.",
|
||||
)
|
||||
|
||||
# Efficiency thresholds
|
||||
efficient_max_calls: int = Field(
|
||||
default=5,
|
||||
description="Maximum tool calls before efficiency penalty begins.",
|
||||
)
|
||||
heavy_penalty_calls: int = Field(
|
||||
default=10,
|
||||
description="Tool call count where efficiency penalty steepens.",
|
||||
)
|
||||
|
||||
# Eval
|
||||
eval_size: int = Field(
|
||||
default=20,
|
||||
description="Number of held-out items for evaluation.",
|
||||
)
|
||||
eval_split_ratio: float = Field(
|
||||
default=0.1,
|
||||
description="Fraction of dataset to hold out for evaluation (0.0–1.0).",
|
||||
)
|
||||
|
||||
# Dataset
|
||||
dataset_name: str = Field(
|
||||
default="google/frames-benchmark",
|
||||
description="HuggingFace dataset name for research questions.",
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Environment
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class WebResearchEnv(HermesAgentBaseEnv):
|
||||
"""
|
||||
RL environment for training multi-step web research skills.
|
||||
|
||||
The model is given a factual question requiring 2-3 hops of web research
|
||||
and must use web_search / web_extract tools to find and synthesize the answer.
|
||||
|
||||
Reward is multi-signal:
|
||||
60% — answer correctness (LLM judge)
|
||||
20% — tool usage (did the model actually search the web?)
|
||||
20% — efficiency (penalizes >5 tool calls)
|
||||
|
||||
Bonus +0.1 for source diversity (≥2 distinct domains cited).
|
||||
"""
|
||||
|
||||
name = "web-research"
|
||||
env_config_cls = WebResearchEnvConfig
|
||||
|
||||
# Default toolsets for this environment — web + file for saving notes
|
||||
default_toolsets = ["web", "file"]
|
||||
|
||||
@classmethod
|
||||
def config_init(cls) -> Tuple[WebResearchEnvConfig, List[APIServerConfig]]:
|
||||
"""Default configuration for the web research environment."""
|
||||
env_config = WebResearchEnvConfig(
|
||||
enabled_toolsets=["web", "file"],
|
||||
max_agent_turns=15,
|
||||
agent_temperature=1.0,
|
||||
system_prompt=(
|
||||
"You are a highly capable research agent. When asked a factual question, "
|
||||
"always use web_search to find current, accurate information before answering. "
|
||||
"Cite at least 2 sources. Be concise and accurate."
|
||||
),
|
||||
group_size=4,
|
||||
total_steps=1000,
|
||||
steps_per_eval=100,
|
||||
use_wandb=True,
|
||||
wandb_name="web-research",
|
||||
)
|
||||
|
||||
server_configs = [
|
||||
APIServerConfig(
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model_name="anthropic/claude-sonnet-4.5",
|
||||
server_type="openai",
|
||||
api_key=os.getenv("OPENROUTER_API_KEY", ""),
|
||||
health_check=False,
|
||||
)
|
||||
]
|
||||
|
||||
return env_config, server_configs
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._items: list[dict] = []
|
||||
self._eval_items: list[dict] = []
|
||||
self._index: int = 0
|
||||
|
||||
# Metrics tracking for wandb
|
||||
self._reward_buffer: list[float] = []
|
||||
self._correctness_buffer: list[float] = []
|
||||
self._tool_usage_buffer: list[float] = []
|
||||
self._efficiency_buffer: list[float] = []
|
||||
self._diversity_buffer: list[float] = []
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 1. Setup — load dataset
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def setup(self) -> None:
|
||||
"""Load the FRAMES benchmark or fall back to built-in samples."""
|
||||
if HF_AVAILABLE:
|
||||
try:
|
||||
logger.info("Loading FRAMES benchmark from HuggingFace...")
|
||||
ds = load_dataset(self.config.dataset_name, split="test")
|
||||
self._items = [
|
||||
{
|
||||
"question": row["Prompt"],
|
||||
"answer": row["Answer"],
|
||||
"difficulty": row.get("reasoning_types", "unknown"),
|
||||
"hops": 2,
|
||||
}
|
||||
for row in ds
|
||||
]
|
||||
# Hold out for eval
|
||||
eval_size = max(
|
||||
self.config.eval_size,
|
||||
int(len(self._items) * self.config.eval_split_ratio),
|
||||
)
|
||||
random.shuffle(self._items)
|
||||
self._eval_items = self._items[:eval_size]
|
||||
self._items = self._items[eval_size:]
|
||||
logger.info(
|
||||
f"Loaded {len(self._items)} train / {len(self._eval_items)} eval items "
|
||||
f"from FRAMES benchmark."
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load FRAMES from HuggingFace: {e}. Using built-in samples.")
|
||||
|
||||
# Fallback
|
||||
random.shuffle(SAMPLE_QUESTIONS)
|
||||
split = max(1, len(SAMPLE_QUESTIONS) * 8 // 10)
|
||||
self._items = SAMPLE_QUESTIONS[:split]
|
||||
self._eval_items = SAMPLE_QUESTIONS[split:]
|
||||
logger.info(
|
||||
f"Using built-in sample dataset: {len(self._items)} train / "
|
||||
f"{len(self._eval_items)} eval items."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 2. get_next_item — return the next question
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def get_next_item(self) -> dict:
|
||||
"""Return the next item, cycling through the dataset."""
|
||||
if not self._items:
|
||||
raise RuntimeError("Dataset is empty. Did you call setup()?")
|
||||
item = self._items[self._index % len(self._items)]
|
||||
self._index += 1
|
||||
return item
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 3. format_prompt — build the user-facing prompt
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def format_prompt(self, item: dict) -> str:
|
||||
"""Format the research question as a task prompt."""
|
||||
return (
|
||||
f"Research the following question thoroughly using web search. "
|
||||
f"You MUST search the web to find current, accurate information — "
|
||||
f"do not rely solely on your training data.\n\n"
|
||||
f"Question: {item['question']}\n\n"
|
||||
f"Requirements:\n"
|
||||
f"- Use web_search and/or web_extract tools to find information\n"
|
||||
f"- Search at least 2 different sources\n"
|
||||
f"- Provide a concise, accurate answer (2-4 sentences)\n"
|
||||
f"- Cite the sources you used"
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 4. compute_reward — multi-signal scoring
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def compute_reward(
|
||||
self,
|
||||
item: dict,
|
||||
result: AgentResult,
|
||||
ctx: ToolContext,
|
||||
) -> float:
|
||||
"""
|
||||
Multi-signal reward function:
|
||||
|
||||
correctness_weight * correctness — LLM judge comparing answer to ground truth
|
||||
tool_usage_weight * tool_used — binary: did the model use web tools?
|
||||
efficiency_weight * efficiency — penalizes wasteful tool usage
|
||||
+ diversity_bonus — source diversity (≥2 distinct domains)
|
||||
"""
|
||||
final_response: str = result.final_response or ""
|
||||
tools_used: list[str] = [
|
||||
tc.tool_name for tc in (result.tool_calls or [])
|
||||
] if hasattr(result, "tool_calls") and result.tool_calls else []
|
||||
tool_call_count: int = result.turns_used or len(tools_used)
|
||||
|
||||
cfg = self.config
|
||||
|
||||
# ---- Signal 1: Answer correctness (LLM judge) ----------------
|
||||
correctness = await self._llm_judge(
|
||||
question=item["question"],
|
||||
expected=item["answer"],
|
||||
model_answer=final_response,
|
||||
)
|
||||
|
||||
# ---- Signal 2: Web tool usage --------------------------------
|
||||
web_tools = {"web_search", "web_extract", "search", "firecrawl"}
|
||||
tool_used = 1.0 if any(t in web_tools for t in tools_used) else 0.0
|
||||
|
||||
# ---- Signal 3: Efficiency ------------------------------------
|
||||
if tool_call_count <= cfg.efficient_max_calls:
|
||||
efficiency = 1.0
|
||||
elif tool_call_count <= cfg.heavy_penalty_calls:
|
||||
efficiency = 1.0 - (tool_call_count - cfg.efficient_max_calls) * 0.08
|
||||
else:
|
||||
efficiency = max(0.0, 1.0 - (tool_call_count - cfg.efficient_max_calls) * 0.12)
|
||||
|
||||
# ---- Bonus: Source diversity ---------------------------------
|
||||
domains = self._extract_domains(final_response)
|
||||
diversity = cfg.diversity_bonus if len(domains) >= 2 else 0.0
|
||||
|
||||
# ---- Combine ------------------------------------------------
|
||||
reward = (
|
||||
cfg.correctness_weight * correctness
|
||||
+ cfg.tool_usage_weight * tool_used
|
||||
+ cfg.efficiency_weight * efficiency
|
||||
+ diversity
|
||||
)
|
||||
reward = min(1.0, max(0.0, reward)) # clamp to [0, 1]
|
||||
|
||||
# Track for wandb
|
||||
self._reward_buffer.append(reward)
|
||||
self._correctness_buffer.append(correctness)
|
||||
self._tool_usage_buffer.append(tool_used)
|
||||
self._efficiency_buffer.append(efficiency)
|
||||
self._diversity_buffer.append(diversity)
|
||||
|
||||
logger.debug(
|
||||
f"Reward breakdown — correctness={correctness:.2f}, "
|
||||
f"tool_used={tool_used:.1f}, efficiency={efficiency:.2f}, "
|
||||
f"diversity={diversity:.1f} → total={reward:.3f}"
|
||||
)
|
||||
|
||||
return reward
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 5. evaluate — run on held-out eval split
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def evaluate(self, *args, **kwargs) -> None:
|
||||
"""Run evaluation on the held-out split using the agent loop."""
|
||||
import time
|
||||
|
||||
items = self._eval_items
|
||||
if not items:
|
||||
logger.warning("No eval items available.")
|
||||
return
|
||||
|
||||
eval_size = min(self.config.eval_size, len(items))
|
||||
eval_items = items[:eval_size]
|
||||
|
||||
logger.info(f"Running eval on {len(eval_items)} questions...")
|
||||
start_time = time.time()
|
||||
samples = []
|
||||
|
||||
for item in eval_items:
|
||||
try:
|
||||
# Use the base env's agent loop for eval (same as training)
|
||||
prompt = self.format_prompt(item)
|
||||
completion = await self.server.chat_completion(
|
||||
messages=[
|
||||
{"role": "system", "content": self.config.system_prompt or ""},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
n=1,
|
||||
max_tokens=self.config.max_token_length,
|
||||
temperature=0.0,
|
||||
split="eval",
|
||||
)
|
||||
|
||||
response_content = (
|
||||
completion.choices[0].message.content if completion.choices else ""
|
||||
)
|
||||
|
||||
# Score the response
|
||||
correctness = await self._llm_judge(
|
||||
question=item["question"],
|
||||
expected=item["answer"],
|
||||
model_answer=response_content,
|
||||
)
|
||||
|
||||
samples.append({
|
||||
"prompt": item["question"],
|
||||
"response": response_content,
|
||||
"expected": item["answer"],
|
||||
"correctness": correctness,
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Eval error on item: {e}")
|
||||
samples.append({
|
||||
"prompt": item["question"],
|
||||
"response": f"ERROR: {e}",
|
||||
"expected": item["answer"],
|
||||
"correctness": 0.0,
|
||||
})
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
# Compute metrics
|
||||
correctness_scores = [s["correctness"] for s in samples]
|
||||
eval_metrics = {
|
||||
"eval/mean_correctness": (
|
||||
sum(correctness_scores) / len(correctness_scores)
|
||||
if correctness_scores else 0.0
|
||||
),
|
||||
"eval/n_items": len(samples),
|
||||
}
|
||||
|
||||
await self.evaluate_log(
|
||||
metrics=eval_metrics,
|
||||
samples=samples,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# 6. wandb_log — custom metrics
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def wandb_log(self, wandb_metrics: Optional[Dict] = None) -> None:
|
||||
"""Log reward breakdown metrics to wandb."""
|
||||
if wandb_metrics is None:
|
||||
wandb_metrics = {}
|
||||
|
||||
if self._reward_buffer:
|
||||
n = len(self._reward_buffer)
|
||||
wandb_metrics["train/mean_reward"] = sum(self._reward_buffer) / n
|
||||
wandb_metrics["train/mean_correctness"] = sum(self._correctness_buffer) / n
|
||||
wandb_metrics["train/mean_tool_usage"] = sum(self._tool_usage_buffer) / n
|
||||
wandb_metrics["train/mean_efficiency"] = sum(self._efficiency_buffer) / n
|
||||
wandb_metrics["train/mean_diversity"] = sum(self._diversity_buffer) / n
|
||||
wandb_metrics["train/total_rollouts"] = n
|
||||
|
||||
# Accuracy buckets
|
||||
wandb_metrics["train/correct_rate"] = (
|
||||
sum(1 for c in self._correctness_buffer if c >= 0.7) / n
|
||||
)
|
||||
wandb_metrics["train/tool_usage_rate"] = (
|
||||
sum(1 for t in self._tool_usage_buffer if t > 0) / n
|
||||
)
|
||||
|
||||
# Clear buffers
|
||||
self._reward_buffer.clear()
|
||||
self._correctness_buffer.clear()
|
||||
self._tool_usage_buffer.clear()
|
||||
self._efficiency_buffer.clear()
|
||||
self._diversity_buffer.clear()
|
||||
|
||||
await super().wandb_log(wandb_metrics)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def _llm_judge(
|
||||
self,
|
||||
question: str,
|
||||
expected: str,
|
||||
model_answer: str,
|
||||
) -> float:
|
||||
"""
|
||||
Use the server's LLM to judge answer correctness.
|
||||
Falls back to keyword heuristic if LLM call fails.
|
||||
"""
|
||||
if not model_answer or not model_answer.strip():
|
||||
return 0.0
|
||||
|
||||
judge_prompt = (
|
||||
"You are an impartial judge evaluating the quality of an AI research answer.\n\n"
|
||||
f"Question: {question}\n\n"
|
||||
f"Reference answer: {expected}\n\n"
|
||||
f"Model answer: {model_answer}\n\n"
|
||||
"Score the model answer on a scale from 0.0 to 1.0 where:\n"
|
||||
" 1.0 = fully correct and complete\n"
|
||||
" 0.7 = mostly correct with minor gaps\n"
|
||||
" 0.4 = partially correct\n"
|
||||
" 0.1 = mentions relevant topic but wrong or very incomplete\n"
|
||||
" 0.0 = completely wrong or no answer\n\n"
|
||||
"Consider: factual accuracy, completeness, and relevance.\n"
|
||||
'Respond with ONLY a JSON object: {"score": <float>, "reason": "<one sentence>"}'
|
||||
)
|
||||
|
||||
try:
|
||||
response = await self.server.chat_completion(
|
||||
messages=[{"role": "user", "content": judge_prompt}],
|
||||
n=1,
|
||||
max_tokens=150,
|
||||
temperature=0.0,
|
||||
split="eval",
|
||||
)
|
||||
text = response.choices[0].message.content if response.choices else ""
|
||||
parsed = self._parse_judge_json(text)
|
||||
if parsed is not None:
|
||||
return float(parsed)
|
||||
except Exception as e:
|
||||
logger.debug(f"LLM judge failed: {e}. Using heuristic.")
|
||||
|
||||
return self._heuristic_score(expected, model_answer)
|
||||
|
||||
@staticmethod
|
||||
def _parse_judge_json(text: str) -> Optional[float]:
|
||||
"""Extract the score float from LLM judge JSON response."""
|
||||
try:
|
||||
clean = re.sub(r"```(?:json)?|```", "", text).strip()
|
||||
data = json.loads(clean)
|
||||
score = float(data.get("score", -1))
|
||||
if 0.0 <= score <= 1.0:
|
||||
return score
|
||||
except Exception:
|
||||
match = re.search(r'"score"\s*:\s*([0-9.]+)', text)
|
||||
if match:
|
||||
score = float(match.group(1))
|
||||
if 0.0 <= score <= 1.0:
|
||||
return score
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _heuristic_score(expected: str, model_answer: str) -> float:
|
||||
"""Lightweight keyword overlap score as fallback."""
|
||||
stopwords = {
|
||||
"the", "a", "an", "is", "are", "was", "were", "of", "in", "on",
|
||||
"at", "to", "for", "with", "and", "or", "but", "it", "its",
|
||||
"this", "that", "as", "by", "from", "be", "has", "have", "had",
|
||||
}
|
||||
|
||||
def tokenize(text: str) -> set:
|
||||
tokens = re.findall(r'\b\w+\b', text.lower())
|
||||
return {t for t in tokens if t not in stopwords and len(t) > 2}
|
||||
|
||||
expected_tokens = tokenize(expected)
|
||||
answer_tokens = tokenize(model_answer)
|
||||
|
||||
if not expected_tokens:
|
||||
return 0.5
|
||||
|
||||
overlap = len(expected_tokens & answer_tokens)
|
||||
union = len(expected_tokens | answer_tokens)
|
||||
|
||||
jaccard = overlap / union if union > 0 else 0.0
|
||||
recall = overlap / len(expected_tokens)
|
||||
return min(1.0, 0.4 * jaccard + 0.6 * recall)
|
||||
|
||||
@staticmethod
|
||||
def _extract_domains(text: str) -> set:
|
||||
"""Extract unique domains from URLs cited in the response."""
|
||||
urls = re.findall(r'https?://[^\s\)>\]"\']+', text)
|
||||
domains = set()
|
||||
for url in urls:
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
domain = parsed.netloc.lower().lstrip("www.")
|
||||
if domain:
|
||||
domains.add(domain)
|
||||
except Exception:
|
||||
pass
|
||||
return domains
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
if __name__ == "__main__":
|
||||
WebResearchEnv.cli()
|
||||
@@ -9,19 +9,18 @@ to various messaging platforms (Telegram, Discord, WhatsApp) with:
|
||||
- Platform-specific toolsets (different capabilities per platform)
|
||||
"""
|
||||
|
||||
from .config import GatewayConfig, PlatformConfig, HomeChannel, load_gateway_config
|
||||
from .config import GatewayConfig, HomeChannel, PlatformConfig, SessionResetPolicy, load_gateway_config
|
||||
from .delivery import DeliveryRouter, DeliveryTarget
|
||||
from .session import (
|
||||
SessionContext,
|
||||
SessionStore,
|
||||
SessionResetPolicy,
|
||||
build_session_context_prompt,
|
||||
)
|
||||
from .delivery import DeliveryRouter, DeliveryTarget
|
||||
|
||||
__all__ = [
|
||||
# Config
|
||||
"GatewayConfig",
|
||||
"PlatformConfig",
|
||||
"PlatformConfig",
|
||||
"HomeChannel",
|
||||
"load_gateway_config",
|
||||
# Session
|
||||
|
||||
@@ -10,7 +10,7 @@ import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -21,7 +21,8 @@ DIRECTORY_PATH = Path.home() / ".hermes" / "channel_directory.json"
|
||||
# Build / refresh
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def build_channel_directory(adapters: Dict[Any, Any]) -> Dict[str, Any]:
|
||||
|
||||
def build_channel_directory(adapters: dict[Any, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Build a channel directory from connected platform adapters and session data.
|
||||
|
||||
@@ -29,7 +30,7 @@ def build_channel_directory(adapters: Dict[Any, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
from gateway.config import Platform
|
||||
|
||||
platforms: Dict[str, List[Dict[str, str]]] = {}
|
||||
platforms: dict[str, list[dict[str, str]]] = {}
|
||||
|
||||
for platform, adapter in adapters.items():
|
||||
try:
|
||||
@@ -60,7 +61,7 @@ def build_channel_directory(adapters: Dict[Any, Any]) -> Dict[str, Any]:
|
||||
return directory
|
||||
|
||||
|
||||
def _build_discord(adapter) -> List[Dict[str, str]]:
|
||||
def _build_discord(adapter) -> list[dict[str, str]]:
|
||||
"""Enumerate all text channels the Discord bot can see."""
|
||||
channels = []
|
||||
client = getattr(adapter, "_client", None)
|
||||
@@ -74,12 +75,14 @@ def _build_discord(adapter) -> List[Dict[str, str]]:
|
||||
|
||||
for guild in client.guilds:
|
||||
for ch in guild.text_channels:
|
||||
channels.append({
|
||||
"id": str(ch.id),
|
||||
"name": ch.name,
|
||||
"guild": guild.name,
|
||||
"type": "channel",
|
||||
})
|
||||
channels.append(
|
||||
{
|
||||
"id": str(ch.id),
|
||||
"name": ch.name,
|
||||
"guild": guild.name,
|
||||
"type": "channel",
|
||||
}
|
||||
)
|
||||
# Also include DM-capable users we've interacted with is not
|
||||
# feasible via guild enumeration; those come from sessions.
|
||||
|
||||
@@ -88,7 +91,7 @@ def _build_discord(adapter) -> List[Dict[str, str]]:
|
||||
return channels
|
||||
|
||||
|
||||
def _build_slack(adapter) -> List[Dict[str, str]]:
|
||||
def _build_slack(adapter) -> list[dict[str, str]]:
|
||||
"""List Slack channels the bot has joined."""
|
||||
channels = []
|
||||
# Slack adapter may expose a web client
|
||||
@@ -97,7 +100,6 @@ def _build_slack(adapter) -> List[Dict[str, str]]:
|
||||
return _build_from_sessions("slack")
|
||||
|
||||
try:
|
||||
import asyncio
|
||||
from tools.send_message_tool import _send_slack # noqa: F401
|
||||
# Use the Slack Web API directly if available
|
||||
except Exception:
|
||||
@@ -107,7 +109,7 @@ def _build_slack(adapter) -> List[Dict[str, str]]:
|
||||
return _build_from_sessions("slack")
|
||||
|
||||
|
||||
def _build_from_sessions(platform_name: str) -> List[Dict[str, str]]:
|
||||
def _build_from_sessions(platform_name: str) -> list[dict[str, str]]:
|
||||
"""Pull known channels/contacts from sessions.json origin data."""
|
||||
sessions_path = Path.home() / ".hermes" / "sessions" / "sessions.json"
|
||||
if not sessions_path.exists():
|
||||
@@ -127,11 +129,13 @@ def _build_from_sessions(platform_name: str) -> List[Dict[str, str]]:
|
||||
if not chat_id or chat_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(chat_id)
|
||||
entries.append({
|
||||
"id": str(chat_id),
|
||||
"name": origin.get("chat_name") or origin.get("user_name") or str(chat_id),
|
||||
"type": session.get("chat_type", "dm"),
|
||||
})
|
||||
entries.append(
|
||||
{
|
||||
"id": str(chat_id),
|
||||
"name": origin.get("chat_name") or origin.get("user_name") or str(chat_id),
|
||||
"type": session.get("chat_type", "dm"),
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Channel directory: failed to read sessions for %s: %s", platform_name, e)
|
||||
|
||||
@@ -142,7 +146,8 @@ def _build_from_sessions(platform_name: str) -> List[Dict[str, str]]:
|
||||
# Read / resolve
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def load_directory() -> Dict[str, Any]:
|
||||
|
||||
def load_directory() -> dict[str, Any]:
|
||||
"""Load the cached channel directory from disk."""
|
||||
if not DIRECTORY_PATH.exists():
|
||||
return {"updated_at": None, "platforms": {}}
|
||||
@@ -153,7 +158,7 @@ def load_directory() -> Dict[str, Any]:
|
||||
return {"updated_at": None, "platforms": {}}
|
||||
|
||||
|
||||
def resolve_channel_name(platform_name: str, name: str) -> Optional[str]:
|
||||
def resolve_channel_name(platform_name: str, name: str) -> str | None:
|
||||
"""
|
||||
Resolve a human-friendly channel name to a numeric ID.
|
||||
|
||||
@@ -206,8 +211,8 @@ def format_directory_for_display() -> str:
|
||||
|
||||
# Group Discord channels by guild
|
||||
if plat_name == "discord":
|
||||
guilds: Dict[str, List] = {}
|
||||
dms: List = []
|
||||
guilds: dict[str, list] = {}
|
||||
dms: list = []
|
||||
for ch in channels:
|
||||
guild = ch.get("guild")
|
||||
if guild:
|
||||
|
||||
@@ -8,19 +8,20 @@ Handles loading and validating configuration for:
|
||||
- Delivery preferences
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
from pathlib import Path
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional, Any
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Platform(Enum):
|
||||
"""Supported messaging platforms."""
|
||||
|
||||
LOCAL = "local"
|
||||
TELEGRAM = "telegram"
|
||||
DISCORD = "discord"
|
||||
@@ -34,23 +35,24 @@ class Platform(Enum):
|
||||
class HomeChannel:
|
||||
"""
|
||||
Default destination for a platform.
|
||||
|
||||
|
||||
When a cron job specifies deliver="telegram" without a specific chat ID,
|
||||
messages are sent to this home channel.
|
||||
"""
|
||||
|
||||
platform: Platform
|
||||
chat_id: str
|
||||
name: str # Human-readable name for display
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"platform": self.platform.value,
|
||||
"chat_id": self.chat_id,
|
||||
"name": self.name,
|
||||
}
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "HomeChannel":
|
||||
def from_dict(cls, data: dict[str, Any]) -> "HomeChannel":
|
||||
return cls(
|
||||
platform=Platform(data["platform"]),
|
||||
chat_id=str(data["chat_id"]),
|
||||
@@ -62,26 +64,27 @@ class HomeChannel:
|
||||
class SessionResetPolicy:
|
||||
"""
|
||||
Controls when sessions reset (lose context).
|
||||
|
||||
|
||||
Modes:
|
||||
- "daily": Reset at a specific hour each day
|
||||
- "idle": Reset after N minutes of inactivity
|
||||
- "both": Whichever triggers first (daily boundary OR idle timeout)
|
||||
- "none": Never auto-reset (context managed only by compression)
|
||||
"""
|
||||
|
||||
mode: str = "both" # "daily", "idle", "both", or "none"
|
||||
at_hour: int = 4 # Hour for daily reset (0-23, local time)
|
||||
idle_minutes: int = 1440 # Minutes of inactivity before reset (24 hours)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"mode": self.mode,
|
||||
"at_hour": self.at_hour,
|
||||
"idle_minutes": self.idle_minutes,
|
||||
}
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SessionResetPolicy":
|
||||
def from_dict(cls, data: dict[str, Any]) -> "SessionResetPolicy":
|
||||
return cls(
|
||||
mode=data.get("mode", "both"),
|
||||
at_hour=data.get("at_hour", 4),
|
||||
@@ -92,15 +95,16 @@ class SessionResetPolicy:
|
||||
@dataclass
|
||||
class PlatformConfig:
|
||||
"""Configuration for a single messaging platform."""
|
||||
|
||||
enabled: bool = False
|
||||
token: Optional[str] = None # Bot token (Telegram, Discord)
|
||||
api_key: Optional[str] = None # API key if different from token
|
||||
home_channel: Optional[HomeChannel] = None
|
||||
|
||||
token: str | None = None # Bot token (Telegram, Discord)
|
||||
api_key: str | None = None # API key if different from token
|
||||
home_channel: HomeChannel | None = None
|
||||
|
||||
# Platform-specific settings
|
||||
extra: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
extra: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = {
|
||||
"enabled": self.enabled,
|
||||
"extra": self.extra,
|
||||
@@ -112,13 +116,13 @@ class PlatformConfig:
|
||||
if self.home_channel:
|
||||
result["home_channel"] = self.home_channel.to_dict()
|
||||
return result
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "PlatformConfig":
|
||||
def from_dict(cls, data: dict[str, Any]) -> "PlatformConfig":
|
||||
home_channel = None
|
||||
if "home_channel" in data:
|
||||
home_channel = HomeChannel.from_dict(data["home_channel"])
|
||||
|
||||
|
||||
return cls(
|
||||
enabled=data.get("enabled", False),
|
||||
token=data.get("token"),
|
||||
@@ -132,89 +136,80 @@ class PlatformConfig:
|
||||
class GatewayConfig:
|
||||
"""
|
||||
Main gateway configuration.
|
||||
|
||||
|
||||
Manages all platform connections, session policies, and delivery settings.
|
||||
"""
|
||||
|
||||
# Platform configurations
|
||||
platforms: Dict[Platform, PlatformConfig] = field(default_factory=dict)
|
||||
|
||||
platforms: dict[Platform, PlatformConfig] = field(default_factory=dict)
|
||||
|
||||
# Session reset policies by type
|
||||
default_reset_policy: SessionResetPolicy = field(default_factory=SessionResetPolicy)
|
||||
reset_by_type: Dict[str, SessionResetPolicy] = field(default_factory=dict)
|
||||
reset_by_platform: Dict[Platform, SessionResetPolicy] = field(default_factory=dict)
|
||||
|
||||
reset_by_type: dict[str, SessionResetPolicy] = field(default_factory=dict)
|
||||
reset_by_platform: dict[Platform, SessionResetPolicy] = field(default_factory=dict)
|
||||
|
||||
# Reset trigger commands
|
||||
reset_triggers: List[str] = field(default_factory=lambda: ["/new", "/reset"])
|
||||
|
||||
reset_triggers: list[str] = field(default_factory=lambda: ["/new", "/reset"])
|
||||
|
||||
# Storage paths
|
||||
sessions_dir: Path = field(default_factory=lambda: Path.home() / ".hermes" / "sessions")
|
||||
|
||||
|
||||
# Delivery settings
|
||||
always_log_local: bool = True # Always save cron outputs to local files
|
||||
|
||||
def get_connected_platforms(self) -> List[Platform]:
|
||||
|
||||
def get_connected_platforms(self) -> list[Platform]:
|
||||
"""Return list of platforms that are enabled and configured."""
|
||||
connected = []
|
||||
for platform, config in self.platforms.items():
|
||||
if not config.enabled:
|
||||
continue
|
||||
# Platforms that use token/api_key auth
|
||||
if config.token or config.api_key:
|
||||
connected.append(platform)
|
||||
# WhatsApp uses enabled flag only (bridge handles auth)
|
||||
elif platform == Platform.WHATSAPP:
|
||||
connected.append(platform)
|
||||
# Signal uses extra dict for config (http_url + account)
|
||||
elif platform == Platform.SIGNAL and config.extra.get("http_url"):
|
||||
if (
|
||||
config.token
|
||||
or config.api_key
|
||||
or platform == Platform.WHATSAPP
|
||||
or platform == Platform.SIGNAL
|
||||
and config.extra.get("http_url")
|
||||
):
|
||||
connected.append(platform)
|
||||
return connected
|
||||
|
||||
def get_home_channel(self, platform: Platform) -> Optional[HomeChannel]:
|
||||
|
||||
def get_home_channel(self, platform: Platform) -> HomeChannel | None:
|
||||
"""Get the home channel for a platform."""
|
||||
config = self.platforms.get(platform)
|
||||
if config:
|
||||
return config.home_channel
|
||||
return None
|
||||
|
||||
def get_reset_policy(
|
||||
self,
|
||||
platform: Optional[Platform] = None,
|
||||
session_type: Optional[str] = None
|
||||
) -> SessionResetPolicy:
|
||||
|
||||
def get_reset_policy(self, platform: Platform | None = None, session_type: str | None = None) -> SessionResetPolicy:
|
||||
"""
|
||||
Get the appropriate reset policy for a session.
|
||||
|
||||
|
||||
Priority: platform override > type override > default
|
||||
"""
|
||||
# Platform-specific override takes precedence
|
||||
if platform and platform in self.reset_by_platform:
|
||||
return self.reset_by_platform[platform]
|
||||
|
||||
|
||||
# Type-specific override (dm, group, thread)
|
||||
if session_type and session_type in self.reset_by_type:
|
||||
return self.reset_by_type[session_type]
|
||||
|
||||
|
||||
return self.default_reset_policy
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"platforms": {
|
||||
p.value: c.to_dict() for p, c in self.platforms.items()
|
||||
},
|
||||
"platforms": {p.value: c.to_dict() for p, c in self.platforms.items()},
|
||||
"default_reset_policy": self.default_reset_policy.to_dict(),
|
||||
"reset_by_type": {
|
||||
k: v.to_dict() for k, v in self.reset_by_type.items()
|
||||
},
|
||||
"reset_by_platform": {
|
||||
p.value: v.to_dict() for p, v in self.reset_by_platform.items()
|
||||
},
|
||||
"reset_by_type": {k: v.to_dict() for k, v in self.reset_by_type.items()},
|
||||
"reset_by_platform": {p.value: v.to_dict() for p, v in self.reset_by_platform.items()},
|
||||
"reset_triggers": self.reset_triggers,
|
||||
"sessions_dir": str(self.sessions_dir),
|
||||
"always_log_local": self.always_log_local,
|
||||
}
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "GatewayConfig":
|
||||
def from_dict(cls, data: dict[str, Any]) -> "GatewayConfig":
|
||||
platforms = {}
|
||||
for platform_name, platform_data in data.get("platforms", {}).items():
|
||||
try:
|
||||
@@ -222,11 +217,11 @@ class GatewayConfig:
|
||||
platforms[platform] = PlatformConfig.from_dict(platform_data)
|
||||
except ValueError:
|
||||
pass # Skip unknown platforms
|
||||
|
||||
|
||||
reset_by_type = {}
|
||||
for type_name, policy_data in data.get("reset_by_type", {}).items():
|
||||
reset_by_type[type_name] = SessionResetPolicy.from_dict(policy_data)
|
||||
|
||||
|
||||
reset_by_platform = {}
|
||||
for platform_name, policy_data in data.get("reset_by_platform", {}).items():
|
||||
try:
|
||||
@@ -234,15 +229,15 @@ class GatewayConfig:
|
||||
reset_by_platform[platform] = SessionResetPolicy.from_dict(policy_data)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
default_policy = SessionResetPolicy()
|
||||
if "default_reset_policy" in data:
|
||||
default_policy = SessionResetPolicy.from_dict(data["default_reset_policy"])
|
||||
|
||||
|
||||
sessions_dir = Path.home() / ".hermes" / "sessions"
|
||||
if "sessions_dir" in data:
|
||||
sessions_dir = Path(data["sessions_dir"])
|
||||
|
||||
|
||||
return cls(
|
||||
platforms=platforms,
|
||||
default_reset_policy=default_policy,
|
||||
@@ -257,7 +252,7 @@ class GatewayConfig:
|
||||
def load_gateway_config() -> GatewayConfig:
|
||||
"""
|
||||
Load gateway configuration from multiple sources.
|
||||
|
||||
|
||||
Priority (highest to lowest):
|
||||
1. Environment variables
|
||||
2. ~/.hermes/gateway.json
|
||||
@@ -265,22 +260,23 @@ def load_gateway_config() -> GatewayConfig:
|
||||
4. Defaults
|
||||
"""
|
||||
config = GatewayConfig()
|
||||
|
||||
|
||||
# Try loading from ~/.hermes/gateway.json
|
||||
gateway_config_path = Path.home() / ".hermes" / "gateway.json"
|
||||
if gateway_config_path.exists():
|
||||
try:
|
||||
with open(gateway_config_path, "r") as f:
|
||||
with open(gateway_config_path) as f:
|
||||
data = json.load(f)
|
||||
config = GatewayConfig.from_dict(data)
|
||||
except Exception as e:
|
||||
print(f"[gateway] Warning: Failed to load {gateway_config_path}: {e}")
|
||||
|
||||
|
||||
# Bridge session_reset from config.yaml (the user-facing config file)
|
||||
# into the gateway config. config.yaml takes precedence over gateway.json
|
||||
# for session reset policy since that's where hermes setup writes it.
|
||||
try:
|
||||
import yaml
|
||||
|
||||
config_yaml_path = Path.home() / ".hermes" / "config.yaml"
|
||||
if config_yaml_path.exists():
|
||||
with open(config_yaml_path) as f:
|
||||
@@ -293,14 +289,12 @@ def load_gateway_config() -> GatewayConfig:
|
||||
|
||||
# Override with environment variables
|
||||
_apply_env_overrides(config)
|
||||
|
||||
|
||||
# --- Validate loaded values ---
|
||||
policy = config.default_reset_policy
|
||||
|
||||
if not (0 <= policy.at_hour <= 23):
|
||||
logger.warning(
|
||||
"Invalid at_hour=%s (must be 0-23). Using default 4.", policy.at_hour
|
||||
)
|
||||
logger.warning("Invalid at_hour=%s (must be 0-23). Using default 4.", policy.at_hour)
|
||||
policy.at_hour = 4
|
||||
|
||||
if policy.idle_minutes is None or policy.idle_minutes <= 0:
|
||||
@@ -323,9 +317,9 @@ def load_gateway_config() -> GatewayConfig:
|
||||
env_name = _token_env_names.get(platform)
|
||||
if env_name and pconfig.token is not None and not pconfig.token.strip():
|
||||
logger.warning(
|
||||
"%s is enabled but %s is empty. "
|
||||
"The adapter will likely fail to connect.",
|
||||
platform.value, env_name,
|
||||
"%s is enabled but %s is empty. The adapter will likely fail to connect.",
|
||||
platform.value,
|
||||
env_name,
|
||||
)
|
||||
|
||||
return config
|
||||
@@ -333,7 +327,7 @@ def load_gateway_config() -> GatewayConfig:
|
||||
|
||||
def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
"""Apply environment variable overrides to config."""
|
||||
|
||||
|
||||
# Telegram
|
||||
telegram_token = os.getenv("TELEGRAM_BOT_TOKEN")
|
||||
if telegram_token:
|
||||
@@ -341,7 +335,7 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
config.platforms[Platform.TELEGRAM] = PlatformConfig()
|
||||
config.platforms[Platform.TELEGRAM].enabled = True
|
||||
config.platforms[Platform.TELEGRAM].token = telegram_token
|
||||
|
||||
|
||||
telegram_home = os.getenv("TELEGRAM_HOME_CHANNEL")
|
||||
if telegram_home and Platform.TELEGRAM in config.platforms:
|
||||
config.platforms[Platform.TELEGRAM].home_channel = HomeChannel(
|
||||
@@ -349,7 +343,7 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
chat_id=telegram_home,
|
||||
name=os.getenv("TELEGRAM_HOME_CHANNEL_NAME", "Home"),
|
||||
)
|
||||
|
||||
|
||||
# Discord
|
||||
discord_token = os.getenv("DISCORD_BOT_TOKEN")
|
||||
if discord_token:
|
||||
@@ -357,7 +351,7 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
config.platforms[Platform.DISCORD] = PlatformConfig()
|
||||
config.platforms[Platform.DISCORD].enabled = True
|
||||
config.platforms[Platform.DISCORD].token = discord_token
|
||||
|
||||
|
||||
discord_home = os.getenv("DISCORD_HOME_CHANNEL")
|
||||
if discord_home and Platform.DISCORD in config.platforms:
|
||||
config.platforms[Platform.DISCORD].home_channel = HomeChannel(
|
||||
@@ -365,14 +359,14 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
chat_id=discord_home,
|
||||
name=os.getenv("DISCORD_HOME_CHANNEL_NAME", "Home"),
|
||||
)
|
||||
|
||||
|
||||
# WhatsApp (typically uses different auth mechanism)
|
||||
whatsapp_enabled = os.getenv("WHATSAPP_ENABLED", "").lower() in ("true", "1", "yes")
|
||||
if whatsapp_enabled:
|
||||
if Platform.WHATSAPP not in config.platforms:
|
||||
config.platforms[Platform.WHATSAPP] = PlatformConfig()
|
||||
config.platforms[Platform.WHATSAPP].enabled = True
|
||||
|
||||
|
||||
# Slack
|
||||
slack_token = os.getenv("SLACK_BOT_TOKEN")
|
||||
if slack_token:
|
||||
@@ -388,7 +382,7 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
chat_id=slack_home,
|
||||
name=os.getenv("SLACK_HOME_CHANNEL_NAME", ""),
|
||||
)
|
||||
|
||||
|
||||
# Signal
|
||||
signal_url = os.getenv("SIGNAL_HTTP_URL")
|
||||
signal_account = os.getenv("SIGNAL_ACCOUNT")
|
||||
@@ -396,11 +390,13 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
if Platform.SIGNAL not in config.platforms:
|
||||
config.platforms[Platform.SIGNAL] = PlatformConfig()
|
||||
config.platforms[Platform.SIGNAL].enabled = True
|
||||
config.platforms[Platform.SIGNAL].extra.update({
|
||||
"http_url": signal_url,
|
||||
"account": signal_account,
|
||||
"ignore_stories": os.getenv("SIGNAL_IGNORE_STORIES", "true").lower() in ("true", "1", "yes"),
|
||||
})
|
||||
config.platforms[Platform.SIGNAL].extra.update(
|
||||
{
|
||||
"http_url": signal_url,
|
||||
"account": signal_account,
|
||||
"ignore_stories": os.getenv("SIGNAL_IGNORE_STORIES", "true").lower() in ("true", "1", "yes"),
|
||||
}
|
||||
)
|
||||
signal_home = os.getenv("SIGNAL_HOME_CHANNEL")
|
||||
if signal_home:
|
||||
config.platforms[Platform.SIGNAL].home_channel = HomeChannel(
|
||||
@@ -427,7 +423,7 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
|
||||
config.default_reset_policy.idle_minutes = int(idle_minutes)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
reset_hour = os.getenv("SESSION_RESET_HOUR")
|
||||
if reset_hour:
|
||||
try:
|
||||
@@ -440,6 +436,6 @@ def save_gateway_config(config: GatewayConfig) -> None:
|
||||
"""Save gateway configuration to ~/.hermes/gateway.json."""
|
||||
gateway_config_path = Path.home() / ".hermes" / "gateway.json"
|
||||
gateway_config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
with open(gateway_config_path, "w") as f:
|
||||
json.dump(config.to_dict(), f, indent=2)
|
||||
|
||||
@@ -9,18 +9,17 @@ Routes messages to the appropriate destination based on:
|
||||
"""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Any, Union
|
||||
from enum import Enum
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAX_PLATFORM_OUTPUT = 4000
|
||||
TRUNCATED_VISIBLE = 3800
|
||||
|
||||
from .config import Platform, GatewayConfig
|
||||
from .config import GatewayConfig, Platform
|
||||
from .session import SessionSource
|
||||
|
||||
|
||||
@@ -28,23 +27,24 @@ from .session import SessionSource
|
||||
class DeliveryTarget:
|
||||
"""
|
||||
A single delivery target.
|
||||
|
||||
|
||||
Represents where a message should be sent:
|
||||
- "origin" → back to source
|
||||
- "local" → save to local files
|
||||
- "telegram" → Telegram home channel
|
||||
- "telegram:123456" → specific Telegram chat
|
||||
"""
|
||||
|
||||
platform: Platform
|
||||
chat_id: Optional[str] = None # None means use home channel
|
||||
chat_id: str | None = None # None means use home channel
|
||||
is_origin: bool = False
|
||||
is_explicit: bool = False # True if chat_id was explicitly specified
|
||||
|
||||
|
||||
@classmethod
|
||||
def parse(cls, target: str, origin: Optional[SessionSource] = None) -> "DeliveryTarget":
|
||||
def parse(cls, target: str, origin: SessionSource | None = None) -> "DeliveryTarget":
|
||||
"""
|
||||
Parse a delivery target string.
|
||||
|
||||
|
||||
Formats:
|
||||
- "origin" → back to source
|
||||
- "local" → local files only
|
||||
@@ -52,7 +52,7 @@ class DeliveryTarget:
|
||||
- "telegram:123456" → specific Telegram chat
|
||||
"""
|
||||
target = target.strip().lower()
|
||||
|
||||
|
||||
if target == "origin":
|
||||
if origin:
|
||||
return cls(
|
||||
@@ -63,10 +63,10 @@ class DeliveryTarget:
|
||||
else:
|
||||
# Fallback to local if no origin
|
||||
return cls(platform=Platform.LOCAL, is_origin=True)
|
||||
|
||||
|
||||
if target == "local":
|
||||
return cls(platform=Platform.LOCAL)
|
||||
|
||||
|
||||
# Check for platform:chat_id format
|
||||
if ":" in target:
|
||||
platform_str, chat_id = target.split(":", 1)
|
||||
@@ -76,7 +76,7 @@ class DeliveryTarget:
|
||||
except ValueError:
|
||||
# Unknown platform, treat as local
|
||||
return cls(platform=Platform.LOCAL)
|
||||
|
||||
|
||||
# Just a platform name (use home channel)
|
||||
try:
|
||||
platform = Platform(target)
|
||||
@@ -84,7 +84,7 @@ class DeliveryTarget:
|
||||
except ValueError:
|
||||
# Unknown platform, treat as local
|
||||
return cls(platform=Platform.LOCAL)
|
||||
|
||||
|
||||
def to_string(self) -> str:
|
||||
"""Convert back to string format."""
|
||||
if self.is_origin:
|
||||
@@ -99,15 +99,15 @@ class DeliveryTarget:
|
||||
class DeliveryRouter:
|
||||
"""
|
||||
Routes messages to appropriate destinations.
|
||||
|
||||
|
||||
Handles the logic of resolving delivery targets and dispatching
|
||||
messages to the right platform adapters.
|
||||
"""
|
||||
|
||||
def __init__(self, config: GatewayConfig, adapters: Dict[Platform, Any] = None):
|
||||
|
||||
def __init__(self, config: GatewayConfig, adapters: dict[Platform, Any] = None):
|
||||
"""
|
||||
Initialize the delivery router.
|
||||
|
||||
|
||||
Args:
|
||||
config: Gateway configuration
|
||||
adapters: Dict mapping platforms to their adapter instances
|
||||
@@ -115,31 +115,27 @@ class DeliveryRouter:
|
||||
self.config = config
|
||||
self.adapters = adapters or {}
|
||||
self.output_dir = Path.home() / ".hermes" / "cron" / "output"
|
||||
|
||||
def resolve_targets(
|
||||
self,
|
||||
deliver: Union[str, List[str]],
|
||||
origin: Optional[SessionSource] = None
|
||||
) -> List[DeliveryTarget]:
|
||||
|
||||
def resolve_targets(self, deliver: str | list[str], origin: SessionSource | None = None) -> list[DeliveryTarget]:
|
||||
"""
|
||||
Resolve delivery specification to concrete targets.
|
||||
|
||||
|
||||
Args:
|
||||
deliver: Delivery spec - "origin", "telegram", ["local", "discord"], etc.
|
||||
origin: The source where the request originated (for "origin" target)
|
||||
|
||||
|
||||
Returns:
|
||||
List of resolved delivery targets
|
||||
"""
|
||||
if isinstance(deliver, str):
|
||||
deliver = [deliver]
|
||||
|
||||
|
||||
targets = []
|
||||
seen_platforms = set()
|
||||
|
||||
|
||||
for target_str in deliver:
|
||||
target = DeliveryTarget.parse(target_str, origin)
|
||||
|
||||
|
||||
# Resolve home channel if needed
|
||||
if target.chat_id is None and target.platform != Platform.LOCAL:
|
||||
home = self.config.get_home_channel(target.platform)
|
||||
@@ -148,109 +144,96 @@ class DeliveryRouter:
|
||||
else:
|
||||
# No home channel configured, skip this platform
|
||||
continue
|
||||
|
||||
|
||||
# Deduplicate
|
||||
key = (target.platform, target.chat_id)
|
||||
if key not in seen_platforms:
|
||||
seen_platforms.add(key)
|
||||
targets.append(target)
|
||||
|
||||
|
||||
# Always include local if configured
|
||||
if self.config.always_log_local:
|
||||
local_key = (Platform.LOCAL, None)
|
||||
if local_key not in seen_platforms:
|
||||
targets.append(DeliveryTarget(platform=Platform.LOCAL))
|
||||
|
||||
|
||||
return targets
|
||||
|
||||
|
||||
async def deliver(
|
||||
self,
|
||||
content: str,
|
||||
targets: List[DeliveryTarget],
|
||||
job_id: Optional[str] = None,
|
||||
job_name: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
targets: list[DeliveryTarget],
|
||||
job_id: str | None = None,
|
||||
job_name: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Deliver content to all specified targets.
|
||||
|
||||
|
||||
Args:
|
||||
content: The message/output to deliver
|
||||
targets: List of delivery targets
|
||||
job_id: Optional job ID (for cron jobs)
|
||||
job_name: Optional job name
|
||||
metadata: Additional metadata to include
|
||||
|
||||
|
||||
Returns:
|
||||
Dict with delivery results per target
|
||||
"""
|
||||
results = {}
|
||||
|
||||
|
||||
for target in targets:
|
||||
try:
|
||||
if target.platform == Platform.LOCAL:
|
||||
result = self._deliver_local(content, job_id, job_name, metadata)
|
||||
else:
|
||||
result = await self._deliver_to_platform(target, content, metadata)
|
||||
|
||||
results[target.to_string()] = {
|
||||
"success": True,
|
||||
"result": result
|
||||
}
|
||||
|
||||
results[target.to_string()] = {"success": True, "result": result}
|
||||
except Exception as e:
|
||||
results[target.to_string()] = {
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
results[target.to_string()] = {"success": False, "error": str(e)}
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def _deliver_local(
|
||||
self,
|
||||
content: str,
|
||||
job_id: Optional[str],
|
||||
job_name: Optional[str],
|
||||
metadata: Optional[Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
self, content: str, job_id: str | None, job_name: str | None, metadata: dict[str, Any] | None
|
||||
) -> dict[str, Any]:
|
||||
"""Save content to local files."""
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
|
||||
|
||||
if job_id:
|
||||
output_path = self.output_dir / job_id / f"{timestamp}.md"
|
||||
else:
|
||||
output_path = self.output_dir / "misc" / f"{timestamp}.md"
|
||||
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# Build the output document
|
||||
lines = []
|
||||
if job_name:
|
||||
lines.append(f"# {job_name}")
|
||||
else:
|
||||
lines.append("# Delivery Output")
|
||||
|
||||
|
||||
lines.append("")
|
||||
lines.append(f"**Timestamp:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
|
||||
|
||||
if job_id:
|
||||
lines.append(f"**Job ID:** {job_id}")
|
||||
|
||||
|
||||
if metadata:
|
||||
for key, value in metadata.items():
|
||||
lines.append(f"**{key}:** {value}")
|
||||
|
||||
|
||||
lines.append("")
|
||||
lines.append("---")
|
||||
lines.append("")
|
||||
lines.append(content)
|
||||
|
||||
|
||||
output_path.write_text("\n".join(lines))
|
||||
|
||||
return {
|
||||
"path": str(output_path),
|
||||
"timestamp": timestamp
|
||||
}
|
||||
|
||||
|
||||
return {"path": str(output_path), "timestamp": timestamp}
|
||||
|
||||
def _save_full_output(self, content: str, job_id: str) -> Path:
|
||||
"""Save full cron output to disk and return the file path."""
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
@@ -261,41 +244,33 @@ class DeliveryRouter:
|
||||
return path
|
||||
|
||||
async def _deliver_to_platform(
|
||||
self,
|
||||
target: DeliveryTarget,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
self, target: DeliveryTarget, content: str, metadata: dict[str, Any] | None
|
||||
) -> dict[str, Any]:
|
||||
"""Deliver content to a messaging platform."""
|
||||
adapter = self.adapters.get(target.platform)
|
||||
|
||||
|
||||
if not adapter:
|
||||
raise ValueError(f"No adapter configured for {target.platform.value}")
|
||||
|
||||
|
||||
if not target.chat_id:
|
||||
raise ValueError(f"No chat ID for {target.platform.value} delivery")
|
||||
|
||||
|
||||
# Guard: truncate oversized cron output to stay within platform limits
|
||||
if len(content) > MAX_PLATFORM_OUTPUT:
|
||||
job_id = (metadata or {}).get("job_id", "unknown")
|
||||
saved_path = self._save_full_output(content, job_id)
|
||||
logger.info("Cron output truncated (%d chars) — full output: %s", len(content), saved_path)
|
||||
content = (
|
||||
content[:TRUNCATED_VISIBLE]
|
||||
+ f"\n\n... [truncated, full output saved to {saved_path}]"
|
||||
)
|
||||
|
||||
content = content[:TRUNCATED_VISIBLE] + f"\n\n... [truncated, full output saved to {saved_path}]"
|
||||
|
||||
return await adapter.send(target.chat_id, content, metadata=metadata)
|
||||
|
||||
|
||||
def parse_deliver_spec(
|
||||
deliver: Optional[Union[str, List[str]]],
|
||||
origin: Optional[SessionSource] = None,
|
||||
default: str = "origin"
|
||||
) -> Union[str, List[str]]:
|
||||
deliver: str | list[str] | None, origin: SessionSource | None = None, default: str = "origin"
|
||||
) -> str | list[str]:
|
||||
"""
|
||||
Normalize a delivery specification.
|
||||
|
||||
|
||||
If None or empty, returns the default.
|
||||
"""
|
||||
if not deliver:
|
||||
@@ -303,17 +278,14 @@ def parse_deliver_spec(
|
||||
return deliver
|
||||
|
||||
|
||||
def build_delivery_context_for_tool(
|
||||
config: GatewayConfig,
|
||||
origin: Optional[SessionSource] = None
|
||||
) -> Dict[str, Any]:
|
||||
def build_delivery_context_for_tool(config: GatewayConfig, origin: SessionSource | None = None) -> dict[str, Any]:
|
||||
"""
|
||||
Build context for the schedule_cronjob tool to understand delivery options.
|
||||
|
||||
|
||||
This is passed to the tool so it can validate and explain delivery targets.
|
||||
"""
|
||||
connected = config.get_connected_platforms()
|
||||
|
||||
|
||||
options = {
|
||||
"origin": {
|
||||
"description": "Back to where this job was created",
|
||||
@@ -322,9 +294,9 @@ def build_delivery_context_for_tool(
|
||||
"local": {
|
||||
"description": "Save to local files only",
|
||||
"available": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
for platform in connected:
|
||||
home = config.get_home_channel(platform)
|
||||
options[platform.value] = {
|
||||
@@ -332,7 +304,7 @@ def build_delivery_context_for_tool(
|
||||
"available": True,
|
||||
"home_channel": home.to_dict() if home else None,
|
||||
}
|
||||
|
||||
|
||||
return {
|
||||
"origin": origin.to_dict() if origin else None,
|
||||
"options": options,
|
||||
|
||||
@@ -21,12 +21,12 @@ Errors in hooks are caught and logged but never block the main pipeline.
|
||||
import asyncio
|
||||
import importlib.util
|
||||
import os
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
HOOKS_DIR = Path(os.path.expanduser("~/.hermes/hooks"))
|
||||
|
||||
|
||||
@@ -42,11 +42,11 @@ class HookRegistry:
|
||||
|
||||
def __init__(self):
|
||||
# event_type -> [handler_fn, ...]
|
||||
self._handlers: Dict[str, List[Callable]] = {}
|
||||
self._loaded_hooks: List[dict] = [] # metadata for listing
|
||||
self._handlers: dict[str, list[Callable]] = {}
|
||||
self._loaded_hooks: list[dict] = [] # metadata for listing
|
||||
|
||||
@property
|
||||
def loaded_hooks(self) -> List[dict]:
|
||||
def loaded_hooks(self) -> list[dict]:
|
||||
"""Return metadata about all loaded hooks."""
|
||||
return list(self._loaded_hooks)
|
||||
|
||||
@@ -84,9 +84,7 @@ class HookRegistry:
|
||||
continue
|
||||
|
||||
# Dynamically load the handler module
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
f"hermes_hook_{hook_name}", handler_path
|
||||
)
|
||||
spec = importlib.util.spec_from_file_location(f"hermes_hook_{hook_name}", handler_path)
|
||||
if spec is None or spec.loader is None:
|
||||
print(f"[hooks] Skipping {hook_name}: could not load handler.py", flush=True)
|
||||
continue
|
||||
@@ -103,19 +101,21 @@ class HookRegistry:
|
||||
for event in events:
|
||||
self._handlers.setdefault(event, []).append(handle_fn)
|
||||
|
||||
self._loaded_hooks.append({
|
||||
"name": hook_name,
|
||||
"description": manifest.get("description", ""),
|
||||
"events": events,
|
||||
"path": str(hook_dir),
|
||||
})
|
||||
self._loaded_hooks.append(
|
||||
{
|
||||
"name": hook_name,
|
||||
"description": manifest.get("description", ""),
|
||||
"events": events,
|
||||
"path": str(hook_dir),
|
||||
}
|
||||
)
|
||||
|
||||
print(f"[hooks] Loaded hook '{hook_name}' for events: {events}", flush=True)
|
||||
|
||||
except Exception as e:
|
||||
print(f"[hooks] Error loading hook {hook_dir.name}: {e}", flush=True)
|
||||
|
||||
async def emit(self, event_type: str, context: Optional[Dict[str, Any]] = None) -> None:
|
||||
async def emit(self, event_type: str, context: dict[str, Any] | None = None) -> None:
|
||||
"""
|
||||
Fire all handlers registered for an event.
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -61,7 +60,7 @@ def mirror_to_session(
|
||||
return False
|
||||
|
||||
|
||||
def _find_session_id(platform: str, chat_id: str) -> Optional[str]:
|
||||
def _find_session_id(platform: str, chat_id: str) -> str | None:
|
||||
"""
|
||||
Find the active session_id for a platform + chat_id pair.
|
||||
|
||||
@@ -113,6 +112,7 @@ def _append_to_sqlite(session_id: str, message: dict) -> None:
|
||||
"""Append a message to the SQLite session database."""
|
||||
try:
|
||||
from hermes_state import SessionDB
|
||||
|
||||
db = SessionDB()
|
||||
db.append_message(
|
||||
session_id=session_id,
|
||||
|
||||
@@ -23,21 +23,19 @@ import os
|
||||
import secrets
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
# Unambiguous alphabet -- excludes 0/O, 1/I to prevent confusion
|
||||
ALPHABET = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
|
||||
CODE_LENGTH = 8
|
||||
|
||||
# Timing constants
|
||||
CODE_TTL_SECONDS = 3600 # Codes expire after 1 hour
|
||||
RATE_LIMIT_SECONDS = 600 # 1 request per user per 10 minutes
|
||||
LOCKOUT_SECONDS = 3600 # Lockout duration after too many failures
|
||||
CODE_TTL_SECONDS = 3600 # Codes expire after 1 hour
|
||||
RATE_LIMIT_SECONDS = 600 # 1 request per user per 10 minutes
|
||||
LOCKOUT_SECONDS = 3600 # Lockout duration after too many failures
|
||||
|
||||
# Limits
|
||||
MAX_PENDING_PER_PLATFORM = 3 # Max pending codes per platform
|
||||
MAX_FAILED_ATTEMPTS = 5 # Failed approvals before lockout
|
||||
MAX_PENDING_PER_PLATFORM = 3 # Max pending codes per platform
|
||||
MAX_FAILED_ATTEMPTS = 5 # Failed approvals before lockout
|
||||
|
||||
PAIRING_DIR = Path(os.path.expanduser("~/.hermes/pairing"))
|
||||
|
||||
@@ -123,9 +121,7 @@ class PairingStore:
|
||||
|
||||
# ----- Pending codes -----
|
||||
|
||||
def generate_code(
|
||||
self, platform: str, user_id: str, user_name: str = ""
|
||||
) -> Optional[str]:
|
||||
def generate_code(self, platform: str, user_id: str, user_name: str = "") -> str | None:
|
||||
"""
|
||||
Generate a pairing code for a new user.
|
||||
|
||||
@@ -165,7 +161,7 @@ class PairingStore:
|
||||
|
||||
return code
|
||||
|
||||
def approve_code(self, platform: str, code: str) -> Optional[dict]:
|
||||
def approve_code(self, platform: str, code: str) -> dict | None:
|
||||
"""
|
||||
Approve a pairing code. Adds the user to the approved list.
|
||||
|
||||
@@ -199,13 +195,15 @@ class PairingStore:
|
||||
pending = self._load_json(self._pending_path(p))
|
||||
for code, info in pending.items():
|
||||
age_min = int((time.time() - info["created_at"]) / 60)
|
||||
results.append({
|
||||
"platform": p,
|
||||
"code": code,
|
||||
"user_id": info["user_id"],
|
||||
"user_name": info.get("user_name", ""),
|
||||
"age_minutes": age_min,
|
||||
})
|
||||
results.append(
|
||||
{
|
||||
"platform": p,
|
||||
"code": code,
|
||||
"user_id": info["user_id"],
|
||||
"user_name": info.get("user_name", ""),
|
||||
"age_minutes": age_min,
|
||||
}
|
||||
)
|
||||
return results
|
||||
|
||||
def clear_pending(self, platform: str = None) -> int:
|
||||
@@ -251,8 +249,11 @@ class PairingStore:
|
||||
lockout_key = f"_lockout:{platform}"
|
||||
limits[lockout_key] = time.time() + LOCKOUT_SECONDS
|
||||
limits[fail_key] = 0 # Reset counter
|
||||
print(f"[pairing] Platform {platform} locked out for {LOCKOUT_SECONDS}s "
|
||||
f"after {MAX_FAILED_ATTEMPTS} failed attempts", flush=True)
|
||||
print(
|
||||
f"[pairing] Platform {platform} locked out for {LOCKOUT_SECONDS}s "
|
||||
f"after {MAX_FAILED_ATTEMPTS} failed attempts",
|
||||
flush=True,
|
||||
)
|
||||
self._save_json(self._rate_limit_path(), limits)
|
||||
|
||||
# ----- Cleanup -----
|
||||
@@ -262,10 +263,7 @@ class PairingStore:
|
||||
path = self._pending_path(platform)
|
||||
pending = self._load_json(path)
|
||||
now = time.time()
|
||||
expired = [
|
||||
code for code, info in pending.items()
|
||||
if (now - info["created_at"]) > CODE_TTL_SECONDS
|
||||
]
|
||||
expired = [code for code, info in pending.items() if (now - info["created_at"]) > CODE_TTL_SECONDS]
|
||||
if expired:
|
||||
for code in expired:
|
||||
del pending[code]
|
||||
|
||||
@@ -303,8 +303,8 @@ Optional but valuable:
|
||||
After implementing everything, verify with:
|
||||
|
||||
```bash
|
||||
# All tests pass
|
||||
python -m pytest tests/ -q
|
||||
# All checks pass (lint + test)
|
||||
make check
|
||||
|
||||
# Grep for your platform name to find any missed integration points
|
||||
grep -r "telegram\|discord\|whatsapp\|slack" gateway/ tools/ agent/ cron/ hermes_cli/ toolsets.py \
|
||||
|
||||
@@ -13,20 +13,20 @@ import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
import sys
|
||||
from collections.abc import Awaitable, Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any, Callable, Awaitable, Tuple
|
||||
from enum import Enum
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from pathlib import Path as _Path
|
||||
from typing import Any
|
||||
|
||||
sys.path.insert(0, str(_Path(__file__).resolve().parents[2]))
|
||||
|
||||
from gateway.config import Platform, PlatformConfig
|
||||
from gateway.session import SessionSource
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Image cache utilities
|
||||
#
|
||||
@@ -251,7 +251,9 @@ def cleanup_document_cache(max_age_hours: int = 24) -> int:
|
||||
|
||||
class MessageType(Enum):
|
||||
"""Types of incoming messages."""
|
||||
|
||||
TEXT = "text"
|
||||
LOCATION = "location"
|
||||
PHOTO = "photo"
|
||||
VIDEO = "video"
|
||||
AUDIO = "audio"
|
||||
@@ -265,42 +267,43 @@ class MessageType(Enum):
|
||||
class MessageEvent:
|
||||
"""
|
||||
Incoming message from a platform.
|
||||
|
||||
|
||||
Normalized representation that all adapters produce.
|
||||
"""
|
||||
|
||||
# Message content
|
||||
text: str
|
||||
message_type: MessageType = MessageType.TEXT
|
||||
|
||||
|
||||
# Source information
|
||||
source: SessionSource = None
|
||||
|
||||
|
||||
# Original platform data
|
||||
raw_message: Any = None
|
||||
message_id: Optional[str] = None
|
||||
|
||||
message_id: str | None = None
|
||||
|
||||
# Media attachments
|
||||
media_urls: List[str] = field(default_factory=list)
|
||||
media_types: List[str] = field(default_factory=list)
|
||||
|
||||
media_urls: list[str] = field(default_factory=list)
|
||||
media_types: list[str] = field(default_factory=list)
|
||||
|
||||
# Reply context
|
||||
reply_to_message_id: Optional[str] = None
|
||||
|
||||
reply_to_message_id: str | None = None
|
||||
|
||||
# Timestamps
|
||||
timestamp: datetime = field(default_factory=datetime.now)
|
||||
|
||||
|
||||
def is_command(self) -> bool:
|
||||
"""Check if this is a command message (e.g., /new, /reset)."""
|
||||
return self.text.startswith("/")
|
||||
|
||||
def get_command(self) -> Optional[str]:
|
||||
|
||||
def get_command(self) -> str | None:
|
||||
"""Extract command name if this is a command message."""
|
||||
if not self.is_command():
|
||||
return None
|
||||
# Split on space and get first word, strip the /
|
||||
parts = self.text.split(maxsplit=1)
|
||||
return parts[0][1:].lower() if parts else None
|
||||
|
||||
|
||||
def get_command_args(self) -> str:
|
||||
"""Get the arguments after a command."""
|
||||
if not self.is_command():
|
||||
@@ -309,91 +312,88 @@ class MessageEvent:
|
||||
return parts[1] if len(parts) > 1 else ""
|
||||
|
||||
|
||||
@dataclass
|
||||
@dataclass
|
||||
class SendResult:
|
||||
"""Result of sending a message."""
|
||||
|
||||
success: bool
|
||||
message_id: Optional[str] = None
|
||||
error: Optional[str] = None
|
||||
message_id: str | None = None
|
||||
error: str | None = None
|
||||
raw_response: Any = None
|
||||
|
||||
|
||||
# Type for message handlers
|
||||
MessageHandler = Callable[[MessageEvent], Awaitable[Optional[str]]]
|
||||
MessageHandler = Callable[[MessageEvent], Awaitable[str | None]]
|
||||
|
||||
|
||||
class BasePlatformAdapter(ABC):
|
||||
"""
|
||||
Base class for platform adapters.
|
||||
|
||||
|
||||
Subclasses implement platform-specific logic for:
|
||||
- Connecting and authenticating
|
||||
- Receiving messages
|
||||
- Sending messages/responses
|
||||
- Handling media
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, config: PlatformConfig, platform: Platform):
|
||||
self.config = config
|
||||
self.platform = platform
|
||||
self._message_handler: Optional[MessageHandler] = None
|
||||
self._message_handler: MessageHandler | None = None
|
||||
self._running = False
|
||||
|
||||
|
||||
# Track active message handlers per session for interrupt support
|
||||
# Key: session_key (e.g., chat_id), Value: (event, asyncio.Event for interrupt)
|
||||
self._active_sessions: Dict[str, asyncio.Event] = {}
|
||||
self._pending_messages: Dict[str, MessageEvent] = {}
|
||||
|
||||
self._active_sessions: dict[str, asyncio.Event] = {}
|
||||
self._pending_messages: dict[str, MessageEvent] = {}
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""Human-readable name for this adapter."""
|
||||
return self.platform.value.title()
|
||||
|
||||
|
||||
@property
|
||||
def is_connected(self) -> bool:
|
||||
"""Check if adapter is currently connected."""
|
||||
return self._running
|
||||
|
||||
|
||||
def set_message_handler(self, handler: MessageHandler) -> None:
|
||||
"""
|
||||
Set the handler for incoming messages.
|
||||
|
||||
|
||||
The handler receives a MessageEvent and should return
|
||||
an optional response string.
|
||||
"""
|
||||
self._message_handler = handler
|
||||
|
||||
|
||||
@abstractmethod
|
||||
async def connect(self) -> bool:
|
||||
"""
|
||||
Connect to the platform and start receiving messages.
|
||||
|
||||
|
||||
Returns True if connection was successful.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
async def disconnect(self) -> None:
|
||||
"""Disconnect from the platform."""
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
async def send(
|
||||
self,
|
||||
chat_id: str,
|
||||
content: str,
|
||||
reply_to: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
self, chat_id: str, content: str, reply_to: str | None = None, metadata: dict[str, Any] | None = None
|
||||
) -> SendResult:
|
||||
"""
|
||||
Send a message to a chat.
|
||||
|
||||
|
||||
Args:
|
||||
chat_id: The chat/channel ID to send to
|
||||
content: Message content (may be markdown)
|
||||
reply_to: Optional message ID to reply to
|
||||
metadata: Additional platform-specific options
|
||||
|
||||
|
||||
Returns:
|
||||
SendResult with success status and message ID
|
||||
"""
|
||||
@@ -415,21 +415,21 @@ class BasePlatformAdapter(ABC):
|
||||
async def send_typing(self, chat_id: str) -> None:
|
||||
"""
|
||||
Send a typing indicator.
|
||||
|
||||
|
||||
Override in subclasses if the platform supports it.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
async def send_image(
|
||||
self,
|
||||
chat_id: str,
|
||||
image_url: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""
|
||||
Send an image natively via the platform API.
|
||||
|
||||
|
||||
Override in subclasses to send images as proper attachments
|
||||
instead of plain-text URLs. Default falls back to sending the
|
||||
URL as a text message.
|
||||
@@ -437,87 +437,91 @@ class BasePlatformAdapter(ABC):
|
||||
# Fallback: send URL as text (subclasses override for native images)
|
||||
text = f"{caption}\n{image_url}" if caption else image_url
|
||||
return await self.send(chat_id=chat_id, content=text, reply_to=reply_to)
|
||||
|
||||
|
||||
async def send_animation(
|
||||
self,
|
||||
chat_id: str,
|
||||
animation_url: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""
|
||||
Send an animated GIF natively via the platform API.
|
||||
|
||||
|
||||
Override in subclasses to send GIFs as proper animations
|
||||
(e.g., Telegram send_animation) so they auto-play inline.
|
||||
Default falls back to send_image.
|
||||
"""
|
||||
return await self.send_image(chat_id=chat_id, image_url=animation_url, caption=caption, reply_to=reply_to)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _is_animation_url(url: str) -> bool:
|
||||
"""Check if a URL points to an animated GIF (vs a static image)."""
|
||||
lower = url.lower().split('?')[0] # Strip query params
|
||||
return lower.endswith('.gif')
|
||||
lower = url.lower().split("?")[0] # Strip query params
|
||||
return lower.endswith(".gif")
|
||||
|
||||
@staticmethod
|
||||
def extract_images(content: str) -> Tuple[List[Tuple[str, str]], str]:
|
||||
def extract_images(content: str) -> tuple[list[tuple[str, str]], str]:
|
||||
"""
|
||||
Extract image URLs from markdown and HTML image tags in a response.
|
||||
|
||||
|
||||
Finds patterns like:
|
||||
- 
|
||||
- <img src="https://example.com/image.png">
|
||||
- <img src="https://example.com/image.png"></img>
|
||||
|
||||
|
||||
Args:
|
||||
content: The response text to scan.
|
||||
|
||||
|
||||
Returns:
|
||||
Tuple of (list of (url, alt_text) pairs, cleaned content with image tags removed).
|
||||
"""
|
||||
images = []
|
||||
cleaned = content
|
||||
|
||||
|
||||
# Match markdown images: 
|
||||
md_pattern = r'!\[([^\]]*)\]\((https?://[^\s\)]+)\)'
|
||||
md_pattern = r"!\[([^\]]*)\]\((https?://[^\s\)]+)\)"
|
||||
for match in re.finditer(md_pattern, content):
|
||||
alt_text = match.group(1)
|
||||
url = match.group(2)
|
||||
# Only extract URLs that look like actual images
|
||||
if any(url.lower().endswith(ext) or ext in url.lower() for ext in
|
||||
['.png', '.jpg', '.jpeg', '.gif', '.webp', 'fal.media', 'fal-cdn', 'replicate.delivery']):
|
||||
if any(
|
||||
url.lower().endswith(ext) or ext in url.lower()
|
||||
for ext in [".png", ".jpg", ".jpeg", ".gif", ".webp", "fal.media", "fal-cdn", "replicate.delivery"]
|
||||
):
|
||||
images.append((url, alt_text))
|
||||
|
||||
|
||||
# Match HTML img tags: <img src="url"> or <img src="url"></img> or <img src="url"/>
|
||||
html_pattern = r'<img\s+src=["\']?(https?://[^\s"\'<>]+)["\']?\s*/?>\s*(?:</img>)?'
|
||||
for match in re.finditer(html_pattern, content):
|
||||
url = match.group(1)
|
||||
images.append((url, ""))
|
||||
|
||||
|
||||
# Remove only the matched image tags from content (not all markdown images)
|
||||
if images:
|
||||
extracted_urls = {url for url, _ in images}
|
||||
|
||||
def _remove_if_extracted(match):
|
||||
url = match.group(2) if match.lastindex >= 2 else match.group(1)
|
||||
return '' if url in extracted_urls else match.group(0)
|
||||
return "" if url in extracted_urls else match.group(0)
|
||||
|
||||
cleaned = re.sub(md_pattern, _remove_if_extracted, cleaned)
|
||||
cleaned = re.sub(html_pattern, _remove_if_extracted, cleaned)
|
||||
# Clean up leftover blank lines
|
||||
cleaned = re.sub(r'\n{3,}', '\n\n', cleaned).strip()
|
||||
|
||||
cleaned = re.sub(r"\n{3,}", "\n\n", cleaned).strip()
|
||||
|
||||
return images, cleaned
|
||||
|
||||
|
||||
async def send_voice(
|
||||
self,
|
||||
chat_id: str,
|
||||
audio_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""
|
||||
Send an audio file as a native voice message via the platform API.
|
||||
|
||||
|
||||
Override in subclasses to send audio as voice bubbles (Telegram)
|
||||
or file attachments (Discord). Default falls back to sending the
|
||||
file path as text.
|
||||
@@ -531,8 +535,8 @@ class BasePlatformAdapter(ABC):
|
||||
self,
|
||||
chat_id: str,
|
||||
video_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""
|
||||
Send a video natively via the platform API.
|
||||
@@ -549,9 +553,9 @@ class BasePlatformAdapter(ABC):
|
||||
self,
|
||||
chat_id: str,
|
||||
file_path: str,
|
||||
caption: Optional[str] = None,
|
||||
file_name: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
file_name: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""
|
||||
Send a document/file natively via the platform API.
|
||||
@@ -568,8 +572,8 @@ class BasePlatformAdapter(ABC):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""
|
||||
Send a local image file natively via the platform API.
|
||||
@@ -584,45 +588,45 @@ class BasePlatformAdapter(ABC):
|
||||
return await self.send(chat_id=chat_id, content=text, reply_to=reply_to)
|
||||
|
||||
@staticmethod
|
||||
def extract_media(content: str) -> Tuple[List[Tuple[str, bool]], str]:
|
||||
def extract_media(content: str) -> tuple[list[tuple[str, bool]], str]:
|
||||
"""
|
||||
Extract MEDIA:<path> tags and [[audio_as_voice]] directives from response text.
|
||||
|
||||
|
||||
The TTS tool returns responses like:
|
||||
[[audio_as_voice]]
|
||||
MEDIA:/path/to/audio.ogg
|
||||
|
||||
|
||||
Args:
|
||||
content: The response text to scan.
|
||||
|
||||
|
||||
Returns:
|
||||
Tuple of (list of (path, is_voice) pairs, cleaned content with tags removed).
|
||||
"""
|
||||
media = []
|
||||
cleaned = content
|
||||
|
||||
|
||||
# Check for [[audio_as_voice]] directive
|
||||
has_voice_tag = "[[audio_as_voice]]" in content
|
||||
cleaned = cleaned.replace("[[audio_as_voice]]", "")
|
||||
|
||||
|
||||
# Extract MEDIA:<path> tags (path may contain spaces)
|
||||
media_pattern = r'MEDIA:(\S+)'
|
||||
media_pattern = r"MEDIA:(\S+)"
|
||||
for match in re.finditer(media_pattern, content):
|
||||
path = match.group(1).strip()
|
||||
if path:
|
||||
media.append((path, has_voice_tag))
|
||||
|
||||
|
||||
# Remove MEDIA tags from content
|
||||
if media:
|
||||
cleaned = re.sub(media_pattern, '', cleaned)
|
||||
cleaned = re.sub(r'\n{3,}', '\n\n', cleaned).strip()
|
||||
|
||||
cleaned = re.sub(media_pattern, "", cleaned)
|
||||
cleaned = re.sub(r"\n{3,}", "\n\n", cleaned).strip()
|
||||
|
||||
return media, cleaned
|
||||
|
||||
|
||||
async def _keep_typing(self, chat_id: str, interval: float = 2.0) -> None:
|
||||
"""
|
||||
Continuously send typing indicator until cancelled.
|
||||
|
||||
|
||||
Telegram/Discord typing status expires after ~5 seconds, so we refresh every 2
|
||||
to recover quickly after progress messages interrupt it.
|
||||
"""
|
||||
@@ -632,20 +636,20 @@ class BasePlatformAdapter(ABC):
|
||||
await asyncio.sleep(interval)
|
||||
except asyncio.CancelledError:
|
||||
pass # Normal cancellation when handler completes
|
||||
|
||||
|
||||
async def handle_message(self, event: MessageEvent) -> None:
|
||||
"""
|
||||
Process an incoming message.
|
||||
|
||||
|
||||
This method returns quickly by spawning background tasks.
|
||||
This allows new messages to be processed even while an agent is running,
|
||||
enabling interruption support.
|
||||
"""
|
||||
if not self._message_handler:
|
||||
return
|
||||
|
||||
|
||||
session_key = event.source.chat_id
|
||||
|
||||
|
||||
# Check if there's already an active handler for this session
|
||||
if session_key in self._active_sessions:
|
||||
# Store this as a pending message - it will interrupt the running agent
|
||||
@@ -654,10 +658,10 @@ class BasePlatformAdapter(ABC):
|
||||
# Signal the interrupt (the processing task checks this)
|
||||
self._active_sessions[session_key].set()
|
||||
return # Don't process now - will be handled after current task finishes
|
||||
|
||||
|
||||
# Spawn background task to process this message
|
||||
asyncio.create_task(self._process_message_background(event, session_key))
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _get_human_delay() -> float:
|
||||
"""
|
||||
@@ -684,35 +688,40 @@ class BasePlatformAdapter(ABC):
|
||||
# Create interrupt event for this session
|
||||
interrupt_event = asyncio.Event()
|
||||
self._active_sessions[session_key] = interrupt_event
|
||||
|
||||
|
||||
# Start continuous typing indicator (refreshes every 2 seconds)
|
||||
typing_task = asyncio.create_task(self._keep_typing(event.source.chat_id))
|
||||
|
||||
|
||||
try:
|
||||
# Call the handler (this can take a while with tool calls)
|
||||
response = await self._message_handler(event)
|
||||
|
||||
|
||||
# Send response if any
|
||||
if not response:
|
||||
logger.warning("[%s] Handler returned empty/None response for %s", self.name, event.source.chat_id)
|
||||
if response:
|
||||
# Extract MEDIA:<path> tags (from TTS tool) before other processing
|
||||
media_files, response = self.extract_media(response)
|
||||
|
||||
|
||||
# Extract image URLs and send them as native platform attachments
|
||||
images, text_content = self.extract_images(response)
|
||||
if images:
|
||||
logger.info("[%s] extract_images found %d image(s) in response (%d chars)", self.name, len(images), len(response))
|
||||
|
||||
logger.info(
|
||||
"[%s] extract_images found %d image(s) in response (%d chars)",
|
||||
self.name,
|
||||
len(images),
|
||||
len(response),
|
||||
)
|
||||
|
||||
# Send the text portion first (if any remains after extractions)
|
||||
if text_content:
|
||||
logger.info("[%s] Sending response (%d chars) to %s", self.name, len(text_content), event.source.chat_id)
|
||||
result = await self.send(
|
||||
chat_id=event.source.chat_id,
|
||||
content=text_content,
|
||||
reply_to=event.message_id
|
||||
logger.info(
|
||||
"[%s] Sending response (%d chars) to %s", self.name, len(text_content), event.source.chat_id
|
||||
)
|
||||
|
||||
result = await self.send(
|
||||
chat_id=event.source.chat_id, content=text_content, reply_to=event.message_id
|
||||
)
|
||||
|
||||
# Log send failures (don't raise - user already saw tool progress)
|
||||
if not result.success:
|
||||
print(f"[{self.name}] Failed to send response: {result.error}")
|
||||
@@ -720,14 +729,14 @@ class BasePlatformAdapter(ABC):
|
||||
fallback_result = await self.send(
|
||||
chat_id=event.source.chat_id,
|
||||
content=f"(Response formatting failed, plain text:)\n\n{text_content[:3500]}",
|
||||
reply_to=event.message_id
|
||||
reply_to=event.message_id,
|
||||
)
|
||||
if not fallback_result.success:
|
||||
print(f"[{self.name}] Fallback send also failed: {fallback_result.error}")
|
||||
|
||||
|
||||
# Human-like pacing delay between text and media
|
||||
human_delay = self._get_human_delay()
|
||||
|
||||
|
||||
# Send extracted images as native attachments
|
||||
if images:
|
||||
logger.info("[%s] Extracted %d image(s) to send as attachments", self.name, len(images))
|
||||
@@ -735,7 +744,12 @@ class BasePlatformAdapter(ABC):
|
||||
if human_delay > 0:
|
||||
await asyncio.sleep(human_delay)
|
||||
try:
|
||||
logger.info("[%s] Sending image: %s (alt=%s)", self.name, image_url[:80], alt_text[:30] if alt_text else "")
|
||||
logger.info(
|
||||
"[%s] Sending image: %s (alt=%s)",
|
||||
self.name,
|
||||
image_url[:80],
|
||||
alt_text[:30] if alt_text else "",
|
||||
)
|
||||
# Route animated GIFs through send_animation for proper playback
|
||||
if self._is_animation_url(image_url):
|
||||
img_result = await self.send_animation(
|
||||
@@ -753,11 +767,11 @@ class BasePlatformAdapter(ABC):
|
||||
logger.error("[%s] Failed to send image: %s", self.name, img_result.error)
|
||||
except Exception as img_err:
|
||||
logger.error("[%s] Error sending image: %s", self.name, img_err, exc_info=True)
|
||||
|
||||
|
||||
# Send extracted media files — route by file type
|
||||
_AUDIO_EXTS = {'.ogg', '.opus', '.mp3', '.wav', '.m4a'}
|
||||
_VIDEO_EXTS = {'.mp4', '.mov', '.avi', '.mkv', '.3gp'}
|
||||
_IMAGE_EXTS = {'.jpg', '.jpeg', '.png', '.webp', '.gif'}
|
||||
_AUDIO_EXTS = {".ogg", ".opus", ".mp3", ".wav", ".m4a"}
|
||||
_VIDEO_EXTS = {".mp4", ".mov", ".avi", ".mkv", ".3gp"}
|
||||
_IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".webp", ".gif"}
|
||||
|
||||
for media_path, is_voice in media_files:
|
||||
if human_delay > 0:
|
||||
@@ -789,7 +803,7 @@ class BasePlatformAdapter(ABC):
|
||||
print(f"[{self.name}] Failed to send media ({ext}): {media_result.error}")
|
||||
except Exception as media_err:
|
||||
print(f"[{self.name}] Error sending media: {media_err}")
|
||||
|
||||
|
||||
# Check if there's a pending message that was queued during our processing
|
||||
if session_key in self._pending_messages:
|
||||
pending_event = self._pending_messages.pop(session_key)
|
||||
@@ -805,10 +819,11 @@ class BasePlatformAdapter(ABC):
|
||||
# Process pending message in new background task
|
||||
await self._process_message_background(pending_event, session_key)
|
||||
return # Already cleaned up
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Error handling message: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
# Stop typing indicator
|
||||
@@ -820,26 +835,26 @@ class BasePlatformAdapter(ABC):
|
||||
# Clean up session tracking
|
||||
if session_key in self._active_sessions:
|
||||
del self._active_sessions[session_key]
|
||||
|
||||
|
||||
def has_pending_interrupt(self, session_key: str) -> bool:
|
||||
"""Check if there's a pending interrupt for a session."""
|
||||
return session_key in self._active_sessions and self._active_sessions[session_key].is_set()
|
||||
|
||||
def get_pending_message(self, session_key: str) -> Optional[MessageEvent]:
|
||||
|
||||
def get_pending_message(self, session_key: str) -> MessageEvent | None:
|
||||
"""Get and clear any pending message for a session."""
|
||||
return self._pending_messages.pop(session_key, None)
|
||||
|
||||
|
||||
def build_source(
|
||||
self,
|
||||
chat_id: str,
|
||||
chat_name: Optional[str] = None,
|
||||
chat_name: str | None = None,
|
||||
chat_type: str = "dm",
|
||||
user_id: Optional[str] = None,
|
||||
user_name: Optional[str] = None,
|
||||
thread_id: Optional[str] = None,
|
||||
chat_topic: Optional[str] = None,
|
||||
user_id_alt: Optional[str] = None,
|
||||
chat_id_alt: Optional[str] = None,
|
||||
user_id: str | None = None,
|
||||
user_name: str | None = None,
|
||||
thread_id: str | None = None,
|
||||
chat_topic: str | None = None,
|
||||
user_id_alt: str | None = None,
|
||||
chat_id_alt: str | None = None,
|
||||
) -> SessionSource:
|
||||
"""Helper to build a SessionSource for this platform."""
|
||||
# Normalize empty topic to None
|
||||
@@ -857,30 +872,30 @@ class BasePlatformAdapter(ABC):
|
||||
user_id_alt=user_id_alt,
|
||||
chat_id_alt=chat_id_alt,
|
||||
)
|
||||
|
||||
|
||||
@abstractmethod
|
||||
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
|
||||
async def get_chat_info(self, chat_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Get information about a chat/channel.
|
||||
|
||||
|
||||
Returns dict with at least:
|
||||
- name: Chat name
|
||||
- type: "dm", "group", "channel"
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def format_message(self, content: str) -> str:
|
||||
"""
|
||||
Format a message for this platform.
|
||||
|
||||
|
||||
Override in subclasses to handle platform-specific formatting
|
||||
(e.g., Telegram MarkdownV2, Discord markdown).
|
||||
|
||||
|
||||
Default implementation returns content as-is.
|
||||
"""
|
||||
return content
|
||||
|
||||
def truncate_message(self, content: str, max_length: int = 4096) -> List[str]:
|
||||
|
||||
def truncate_message(self, content: str, max_length: int = 4096) -> list[str]:
|
||||
"""
|
||||
Split a long message into chunks, preserving code block boundaries.
|
||||
|
||||
@@ -899,14 +914,14 @@ class BasePlatformAdapter(ABC):
|
||||
if len(content) <= max_length:
|
||||
return [content]
|
||||
|
||||
INDICATOR_RESERVE = 10 # room for " (XX/XX)"
|
||||
INDICATOR_RESERVE = 10 # room for " (XX/XX)"
|
||||
FENCE_CLOSE = "\n```"
|
||||
|
||||
chunks: List[str] = []
|
||||
chunks: list[str] = []
|
||||
remaining = content
|
||||
# When the previous chunk ended mid-code-block, this holds the
|
||||
# language tag (possibly "") so we can reopen the fence.
|
||||
carry_lang: Optional[str] = None
|
||||
carry_lang: str | None = None
|
||||
|
||||
while remaining:
|
||||
# If we're continuing a code block from the previous chunk,
|
||||
@@ -964,8 +979,6 @@ class BasePlatformAdapter(ABC):
|
||||
# Append chunk indicators when the response spans multiple messages
|
||||
if len(chunks) > 1:
|
||||
total = len(chunks)
|
||||
chunks = [
|
||||
f"{chunk} ({i + 1}/{total})" for i, chunk in enumerate(chunks)
|
||||
]
|
||||
chunks = [f"{chunk} ({i + 1}/{total})" for i, chunk in enumerate(chunks)]
|
||||
|
||||
return chunks
|
||||
|
||||
@@ -10,14 +10,16 @@ Uses discord.py library for:
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict, List, Optional, Any
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import discord
|
||||
from discord import Message as DiscordMessage, Intents
|
||||
from discord import Intents
|
||||
from discord import Message as DiscordMessage
|
||||
from discord.ext import commands
|
||||
|
||||
DISCORD_AVAILABLE = True
|
||||
except ImportError:
|
||||
DISCORD_AVAILABLE = False
|
||||
@@ -28,6 +30,7 @@ except ImportError:
|
||||
|
||||
import sys
|
||||
from pathlib import Path as _Path
|
||||
|
||||
sys.path.insert(0, str(_Path(__file__).resolve().parents[2]))
|
||||
|
||||
from gateway.config import Platform, PlatformConfig
|
||||
@@ -36,8 +39,8 @@ from gateway.platforms.base import (
|
||||
MessageEvent,
|
||||
MessageType,
|
||||
SendResult,
|
||||
cache_image_from_url,
|
||||
cache_audio_from_url,
|
||||
cache_image_from_url,
|
||||
)
|
||||
|
||||
|
||||
@@ -49,7 +52,7 @@ def check_discord_requirements() -> bool:
|
||||
class DiscordAdapter(BasePlatformAdapter):
|
||||
"""
|
||||
Discord bot adapter.
|
||||
|
||||
|
||||
Handles:
|
||||
- Receiving messages from servers and DMs
|
||||
- Sending responses with Discord markdown
|
||||
@@ -59,26 +62,26 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
- Auto-threading for long conversations
|
||||
- Reaction-based feedback
|
||||
"""
|
||||
|
||||
|
||||
# Discord message limits
|
||||
MAX_MESSAGE_LENGTH = 2000
|
||||
|
||||
|
||||
def __init__(self, config: PlatformConfig):
|
||||
super().__init__(config, Platform.DISCORD)
|
||||
self._client: Optional[commands.Bot] = None
|
||||
self._client: commands.Bot | None = None
|
||||
self._ready_event = asyncio.Event()
|
||||
self._allowed_user_ids: set = set() # For button approval authorization
|
||||
|
||||
|
||||
async def connect(self) -> bool:
|
||||
"""Connect to Discord and start receiving events."""
|
||||
if not DISCORD_AVAILABLE:
|
||||
print(f"[{self.name}] discord.py not installed. Run: pip install discord.py")
|
||||
return False
|
||||
|
||||
|
||||
if not self.config.token:
|
||||
print(f"[{self.name}] No bot token configured")
|
||||
return False
|
||||
|
||||
|
||||
try:
|
||||
# Set up intents -- members intent needed for username-to-ID resolution
|
||||
intents = Intents.default()
|
||||
@@ -86,30 +89,28 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
intents.dm_messages = True
|
||||
intents.guild_messages = True
|
||||
intents.members = True
|
||||
|
||||
|
||||
# Create bot
|
||||
self._client = commands.Bot(
|
||||
command_prefix="!", # Not really used, we handle raw messages
|
||||
intents=intents,
|
||||
)
|
||||
|
||||
|
||||
# Parse allowed user entries (may contain usernames or IDs)
|
||||
allowed_env = os.getenv("DISCORD_ALLOWED_USERS", "")
|
||||
if allowed_env:
|
||||
self._allowed_user_ids = {
|
||||
uid.strip() for uid in allowed_env.split(",") if uid.strip()
|
||||
}
|
||||
|
||||
self._allowed_user_ids = {uid.strip() for uid in allowed_env.split(",") if uid.strip()}
|
||||
|
||||
adapter_self = self # capture for closure
|
||||
|
||||
|
||||
# Register event handlers
|
||||
@self._client.event
|
||||
async def on_ready():
|
||||
print(f"[{adapter_self.name}] Connected as {adapter_self._client.user}")
|
||||
|
||||
|
||||
# Resolve any usernames in the allowed list to numeric IDs
|
||||
await adapter_self._resolve_allowed_usernames()
|
||||
|
||||
|
||||
# Sync slash commands with Discord
|
||||
try:
|
||||
synced = await adapter_self._client.tree.sync()
|
||||
@@ -117,33 +118,33 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
except Exception as e:
|
||||
print(f"[{adapter_self.name}] Slash command sync failed: {e}")
|
||||
adapter_self._ready_event.set()
|
||||
|
||||
|
||||
@self._client.event
|
||||
async def on_message(message: DiscordMessage):
|
||||
# Ignore bot's own messages
|
||||
if message.author == self._client.user:
|
||||
return
|
||||
await self._handle_message(message)
|
||||
|
||||
|
||||
# Register slash commands
|
||||
self._register_slash_commands()
|
||||
|
||||
|
||||
# Start the bot in background
|
||||
asyncio.create_task(self._client.start(self.config.token))
|
||||
|
||||
|
||||
# Wait for ready
|
||||
await asyncio.wait_for(self._ready_event.wait(), timeout=30)
|
||||
|
||||
|
||||
self._running = True
|
||||
return True
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
|
||||
except TimeoutError:
|
||||
print(f"[{self.name}] Timeout waiting for connection")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to connect: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def disconnect(self) -> None:
|
||||
"""Disconnect from Discord."""
|
||||
if self._client:
|
||||
@@ -151,59 +152,55 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
await self._client.close()
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Error during disconnect: {e}")
|
||||
|
||||
|
||||
self._running = False
|
||||
self._client = None
|
||||
self._ready_event.clear()
|
||||
print(f"[{self.name}] Disconnected")
|
||||
|
||||
|
||||
async def send(
|
||||
self,
|
||||
chat_id: str,
|
||||
content: str,
|
||||
reply_to: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
self, chat_id: str, content: str, reply_to: str | None = None, metadata: dict[str, Any] | None = None
|
||||
) -> SendResult:
|
||||
"""Send a message to a Discord channel."""
|
||||
if not self._client:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
# Get the channel
|
||||
channel = self._client.get_channel(int(chat_id))
|
||||
if not channel:
|
||||
channel = await self._client.fetch_channel(int(chat_id))
|
||||
|
||||
|
||||
if not channel:
|
||||
return SendResult(success=False, error=f"Channel {chat_id} not found")
|
||||
|
||||
|
||||
# Format and split message if needed
|
||||
formatted = self.format_message(content)
|
||||
chunks = self.truncate_message(formatted, self.MAX_MESSAGE_LENGTH)
|
||||
|
||||
|
||||
message_ids = []
|
||||
reference = None
|
||||
|
||||
|
||||
if reply_to:
|
||||
try:
|
||||
ref_msg = await channel.fetch_message(int(reply_to))
|
||||
reference = ref_msg
|
||||
except Exception as e:
|
||||
logger.debug("Could not fetch reply-to message: %s", e)
|
||||
|
||||
|
||||
for i, chunk in enumerate(chunks):
|
||||
msg = await channel.send(
|
||||
content=chunk,
|
||||
reference=reference if i == 0 else None,
|
||||
)
|
||||
message_ids.append(str(msg.id))
|
||||
|
||||
|
||||
return SendResult(
|
||||
success=True,
|
||||
message_id=message_ids[0] if message_ids else None,
|
||||
raw_response={"message_ids": message_ids}
|
||||
raw_response={"message_ids": message_ids},
|
||||
)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
return SendResult(success=False, error=str(e))
|
||||
|
||||
@@ -223,7 +220,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
msg = await channel.fetch_message(int(message_id))
|
||||
formatted = self.format_message(content)
|
||||
if len(formatted) > self.MAX_MESSAGE_LENGTH:
|
||||
formatted = formatted[:self.MAX_MESSAGE_LENGTH - 3] + "..."
|
||||
formatted = formatted[: self.MAX_MESSAGE_LENGTH - 3] + "..."
|
||||
await msg.edit(content=formatted)
|
||||
return SendResult(success=True, message_id=message_id)
|
||||
except Exception as e:
|
||||
@@ -233,28 +230,28 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
audio_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send audio as a Discord file attachment."""
|
||||
if not self._client:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
import io
|
||||
|
||||
|
||||
channel = self._client.get_channel(int(chat_id))
|
||||
if not channel:
|
||||
channel = await self._client.fetch_channel(int(chat_id))
|
||||
if not channel:
|
||||
return SendResult(success=False, error=f"Channel {chat_id} not found")
|
||||
|
||||
|
||||
if not os.path.exists(audio_path):
|
||||
return SendResult(success=False, error=f"Audio file not found: {audio_path}")
|
||||
|
||||
|
||||
# Determine filename from path
|
||||
filename = os.path.basename(audio_path)
|
||||
|
||||
|
||||
with open(audio_path, "rb") as f:
|
||||
file = discord.File(io.BytesIO(f.read()), filename=filename)
|
||||
msg = await channel.send(
|
||||
@@ -262,36 +259,36 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
file=file,
|
||||
)
|
||||
return SendResult(success=True, message_id=str(msg.id))
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to send audio: {e}")
|
||||
return await super().send_voice(chat_id, audio_path, caption, reply_to)
|
||||
|
||||
|
||||
async def send_image_file(
|
||||
self,
|
||||
chat_id: str,
|
||||
image_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a local image file natively as a Discord file attachment."""
|
||||
if not self._client:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
import io
|
||||
|
||||
|
||||
channel = self._client.get_channel(int(chat_id))
|
||||
if not channel:
|
||||
channel = await self._client.fetch_channel(int(chat_id))
|
||||
if not channel:
|
||||
return SendResult(success=False, error=f"Channel {chat_id} not found")
|
||||
|
||||
|
||||
if not os.path.exists(image_path):
|
||||
return SendResult(success=False, error=f"Image file not found: {image_path}")
|
||||
|
||||
|
||||
filename = os.path.basename(image_path)
|
||||
|
||||
|
||||
with open(image_path, "rb") as f:
|
||||
file = discord.File(io.BytesIO(f.read()), filename=filename)
|
||||
msg = await channel.send(
|
||||
@@ -299,7 +296,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
file=file,
|
||||
)
|
||||
return SendResult(success=True, message_id=str(msg.id))
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to send local image: {e}")
|
||||
return await super().send_image_file(chat_id, image_path, caption, reply_to)
|
||||
@@ -308,31 +305,31 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_url: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send an image natively as a Discord file attachment."""
|
||||
if not self._client:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
import aiohttp
|
||||
|
||||
|
||||
channel = self._client.get_channel(int(chat_id))
|
||||
if not channel:
|
||||
channel = await self._client.fetch_channel(int(chat_id))
|
||||
if not channel:
|
||||
return SendResult(success=False, error=f"Channel {chat_id} not found")
|
||||
|
||||
|
||||
# Download the image and send as a Discord file attachment
|
||||
# (Discord renders attachments inline, unlike plain URLs)
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(image_url, timeout=aiohttp.ClientTimeout(total=30)) as resp:
|
||||
if resp.status != 200:
|
||||
raise Exception(f"Failed to download image: HTTP {resp.status}")
|
||||
|
||||
|
||||
image_data = await resp.read()
|
||||
|
||||
|
||||
# Determine filename from URL or content type
|
||||
content_type = resp.headers.get("content-type", "image/png")
|
||||
ext = "png"
|
||||
@@ -342,23 +339,24 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
ext = "gif"
|
||||
elif "webp" in content_type:
|
||||
ext = "webp"
|
||||
|
||||
|
||||
import io
|
||||
|
||||
file = discord.File(io.BytesIO(image_data), filename=f"image.{ext}")
|
||||
|
||||
|
||||
msg = await channel.send(
|
||||
content=caption if caption else None,
|
||||
file=file,
|
||||
)
|
||||
return SendResult(success=True, message_id=str(msg.id))
|
||||
|
||||
|
||||
except ImportError:
|
||||
print(f"[{self.name}] aiohttp not installed, falling back to URL. Run: pip install aiohttp")
|
||||
return await super().send_image(chat_id, image_url, caption, reply_to)
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to send image attachment, falling back to URL: {e}")
|
||||
return await super().send_image(chat_id, image_url, caption, reply_to)
|
||||
|
||||
|
||||
async def send_typing(self, chat_id: str) -> None:
|
||||
"""Send typing indicator."""
|
||||
if self._client:
|
||||
@@ -368,20 +366,20 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
await channel.typing()
|
||||
except Exception:
|
||||
pass # Ignore typing indicator failures
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> dict[str, Any]:
|
||||
"""Get information about a Discord channel."""
|
||||
if not self._client:
|
||||
return {"name": "Unknown", "type": "dm"}
|
||||
|
||||
|
||||
try:
|
||||
channel = self._client.get_channel(int(chat_id))
|
||||
if not channel:
|
||||
channel = await self._client.fetch_channel(int(chat_id))
|
||||
|
||||
|
||||
if not channel:
|
||||
return {"name": str(chat_id), "type": "dm"}
|
||||
|
||||
|
||||
# Determine channel type
|
||||
if isinstance(channel, discord.DMChannel):
|
||||
chat_type = "dm"
|
||||
@@ -397,7 +395,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
else:
|
||||
chat_type = "channel"
|
||||
name = getattr(channel, "name", str(chat_id))
|
||||
|
||||
|
||||
return {
|
||||
"name": name,
|
||||
"type": chat_type,
|
||||
@@ -406,7 +404,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
}
|
||||
except Exception as e:
|
||||
return {"name": str(chat_id), "type": "dm", "error": str(e)}
|
||||
|
||||
|
||||
async def _resolve_allowed_usernames(self) -> None:
|
||||
"""
|
||||
Resolve non-numeric entries in DISCORD_ALLOWED_USERS to Discord user IDs.
|
||||
@@ -453,8 +451,10 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
uid = str(member.id)
|
||||
numeric_ids.add(uid)
|
||||
resolved_count += 1
|
||||
matched_name = name_lower if name_lower in to_resolve else (
|
||||
display_lower if display_lower in to_resolve else global_lower
|
||||
matched_name = (
|
||||
name_lower
|
||||
if name_lower in to_resolve
|
||||
else (display_lower if display_lower in to_resolve else global_lower)
|
||||
)
|
||||
to_resolve.discard(matched_name)
|
||||
print(f"[{self.name}] Resolved '{matched_name}' -> {uid} ({member.name}#{member.discriminator})")
|
||||
@@ -474,12 +474,12 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
def format_message(self, content: str) -> str:
|
||||
"""
|
||||
Format message for Discord.
|
||||
|
||||
|
||||
Discord uses its own markdown variant.
|
||||
"""
|
||||
# Discord markdown is fairly standard, no special escaping needed
|
||||
return content
|
||||
|
||||
|
||||
def _register_slash_commands(self) -> None:
|
||||
"""Register Discord slash commands on the command tree."""
|
||||
if not self._client:
|
||||
@@ -694,7 +694,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
chat_name = interaction.channel.name
|
||||
if hasattr(interaction.channel, "guild") and interaction.channel.guild:
|
||||
chat_name = f"{interaction.channel.guild.name} / #{chat_name}"
|
||||
|
||||
|
||||
# Get channel topic (if available)
|
||||
chat_topic = getattr(interaction.channel, "topic", None)
|
||||
|
||||
@@ -715,9 +715,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
raw_message=interaction,
|
||||
)
|
||||
|
||||
async def send_exec_approval(
|
||||
self, chat_id: str, command: str, approval_id: str
|
||||
) -> SendResult:
|
||||
async def send_exec_approval(self, chat_id: str, command: str, approval_id: str) -> SendResult:
|
||||
"""
|
||||
Send a button-based exec approval prompt for a dangerous command.
|
||||
|
||||
@@ -759,28 +757,28 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
# bot responds to every message without needing a mention.
|
||||
# DISCORD_REQUIRE_MENTION: Set to "false" to disable mention requirement
|
||||
# globally (all channels become free-response). Default: "true".
|
||||
|
||||
|
||||
if not isinstance(message.channel, discord.DMChannel):
|
||||
# Check if this channel is in the free-response list
|
||||
free_channels_raw = os.getenv("DISCORD_FREE_RESPONSE_CHANNELS", "")
|
||||
free_channels = {ch.strip() for ch in free_channels_raw.split(",") if ch.strip()}
|
||||
channel_id = str(message.channel.id)
|
||||
|
||||
|
||||
# Global override: if DISCORD_REQUIRE_MENTION=false, all channels are free
|
||||
require_mention = os.getenv("DISCORD_REQUIRE_MENTION", "true").lower() not in ("false", "0", "no")
|
||||
|
||||
|
||||
is_free_channel = channel_id in free_channels
|
||||
|
||||
|
||||
if require_mention and not is_free_channel:
|
||||
# Must be @mentioned to respond
|
||||
if self._client.user not in message.mentions:
|
||||
return # Silently ignore messages that don't mention the bot
|
||||
|
||||
|
||||
# Strip the bot mention from the message text so the agent sees clean input
|
||||
if self._client.user and self._client.user in message.mentions:
|
||||
message.content = message.content.replace(f"<@{self._client.user.id}>", "").strip()
|
||||
message.content = message.content.replace(f"<@!{self._client.user.id}>", "").strip()
|
||||
|
||||
|
||||
# Determine message type
|
||||
msg_type = MessageType.TEXT
|
||||
if message.content.startswith("/"):
|
||||
@@ -798,7 +796,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
else:
|
||||
msg_type = MessageType.DOCUMENT
|
||||
break
|
||||
|
||||
|
||||
# Determine chat type
|
||||
if isinstance(message.channel, discord.DMChannel):
|
||||
chat_type = "dm"
|
||||
@@ -811,15 +809,15 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
chat_name = getattr(message.channel, "name", str(message.channel.id))
|
||||
if hasattr(message.channel, "guild") and message.channel.guild:
|
||||
chat_name = f"{message.channel.guild.name} / #{chat_name}"
|
||||
|
||||
|
||||
# Get thread ID if in a thread
|
||||
thread_id = None
|
||||
if isinstance(message.channel, discord.Thread):
|
||||
thread_id = str(message.channel.id)
|
||||
|
||||
|
||||
# Get channel topic (if available - TextChannels have topics, DMs/threads don't)
|
||||
chat_topic = getattr(message.channel, "topic", None)
|
||||
|
||||
|
||||
# Build source
|
||||
source = self.build_source(
|
||||
chat_id=str(message.channel.id),
|
||||
@@ -830,7 +828,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
thread_id=thread_id,
|
||||
chat_topic=chat_topic,
|
||||
)
|
||||
|
||||
|
||||
# Build media URLs -- download image attachments to local cache so the
|
||||
# vision tool can access them reliably (Discord CDN URLs can expire).
|
||||
media_urls = []
|
||||
@@ -869,7 +867,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
# Other attachments: keep the original URL
|
||||
media_urls.append(att.url)
|
||||
media_types.append(content_type)
|
||||
|
||||
|
||||
event = MessageEvent(
|
||||
text=message.content,
|
||||
message_type=msg_type,
|
||||
@@ -881,7 +879,7 @@ class DiscordAdapter(BasePlatformAdapter):
|
||||
reply_to_message_id=str(message.reference.message_id) if message.reference else None,
|
||||
timestamp=message.created_at,
|
||||
)
|
||||
|
||||
|
||||
await self.handle_message(event)
|
||||
|
||||
|
||||
@@ -911,20 +909,14 @@ if DISCORD_AVAILABLE:
|
||||
return True # No allowlist = anyone can approve
|
||||
return str(interaction.user.id) in self.allowed_user_ids
|
||||
|
||||
async def _resolve(
|
||||
self, interaction: discord.Interaction, action: str, color: discord.Color
|
||||
):
|
||||
async def _resolve(self, interaction: discord.Interaction, action: str, color: discord.Color):
|
||||
"""Resolve the approval and update the message."""
|
||||
if self.resolved:
|
||||
await interaction.response.send_message(
|
||||
"This approval has already been resolved~", ephemeral=True
|
||||
)
|
||||
await interaction.response.send_message("This approval has already been resolved~", ephemeral=True)
|
||||
return
|
||||
|
||||
if not self._check_auth(interaction):
|
||||
await interaction.response.send_message(
|
||||
"You're not authorized to approve commands~", ephemeral=True
|
||||
)
|
||||
await interaction.response.send_message("You're not authorized to approve commands~", ephemeral=True)
|
||||
return
|
||||
|
||||
self.resolved = True
|
||||
@@ -944,6 +936,7 @@ if DISCORD_AVAILABLE:
|
||||
# Store the approval decision
|
||||
try:
|
||||
from tools.approval import approve_permanent
|
||||
|
||||
if action == "allow_once":
|
||||
pass # One-time approval handled by gateway
|
||||
elif action == "allow_always":
|
||||
@@ -952,21 +945,15 @@ if DISCORD_AVAILABLE:
|
||||
pass
|
||||
|
||||
@discord.ui.button(label="Allow Once", style=discord.ButtonStyle.green)
|
||||
async def allow_once(
|
||||
self, interaction: discord.Interaction, button: discord.ui.Button
|
||||
):
|
||||
async def allow_once(self, interaction: discord.Interaction, button: discord.ui.Button):
|
||||
await self._resolve(interaction, "allow_once", discord.Color.green())
|
||||
|
||||
@discord.ui.button(label="Always Allow", style=discord.ButtonStyle.blurple)
|
||||
async def allow_always(
|
||||
self, interaction: discord.Interaction, button: discord.ui.Button
|
||||
):
|
||||
async def allow_always(self, interaction: discord.Interaction, button: discord.ui.Button):
|
||||
await self._resolve(interaction, "allow_always", discord.Color.blue())
|
||||
|
||||
@discord.ui.button(label="Deny", style=discord.ButtonStyle.red)
|
||||
async def deny(
|
||||
self, interaction: discord.Interaction, button: discord.ui.Button
|
||||
):
|
||||
async def deny(self, interaction: discord.Interaction, button: discord.ui.Button):
|
||||
await self._resolve(interaction, "deny", discord.Color.red())
|
||||
|
||||
async def on_timeout(self):
|
||||
|
||||
@@ -19,10 +19,11 @@ import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
from typing import Any
|
||||
|
||||
try:
|
||||
import aiohttp
|
||||
|
||||
AIOHTTP_AVAILABLE = True
|
||||
except ImportError:
|
||||
AIOHTTP_AVAILABLE = False
|
||||
@@ -66,10 +67,10 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
super().__init__(config, Platform.HOMEASSISTANT)
|
||||
|
||||
# Connection state
|
||||
self._session: Optional["aiohttp.ClientSession"] = None
|
||||
self._ws: Optional["aiohttp.ClientWebSocketResponse"] = None
|
||||
self._rest_session: Optional["aiohttp.ClientSession"] = None
|
||||
self._listen_task: Optional[asyncio.Task] = None
|
||||
self._session: aiohttp.ClientSession | None = None
|
||||
self._ws: aiohttp.ClientWebSocketResponse | None = None
|
||||
self._rest_session: aiohttp.ClientSession | None = None
|
||||
self._listen_task: asyncio.Task | None = None
|
||||
self._msg_id: int = 0
|
||||
|
||||
# Configuration from extra
|
||||
@@ -80,13 +81,13 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
self._hass_token: str = token
|
||||
|
||||
# Event filtering
|
||||
self._watch_domains: Set[str] = set(extra.get("watch_domains", []))
|
||||
self._watch_entities: Set[str] = set(extra.get("watch_entities", []))
|
||||
self._ignore_entities: Set[str] = set(extra.get("ignore_entities", []))
|
||||
self._watch_domains: set[str] = set(extra.get("watch_domains", []))
|
||||
self._watch_entities: set[str] = set(extra.get("watch_entities", []))
|
||||
self._ignore_entities: set[str] = set(extra.get("ignore_entities", []))
|
||||
self._cooldown_seconds: int = int(extra.get("cooldown_seconds", 30))
|
||||
|
||||
# Cooldown tracking: entity_id -> last_event_timestamp
|
||||
self._last_event_time: Dict[str, float] = {}
|
||||
self._last_event_time: dict[str, float] = {}
|
||||
|
||||
def _next_id(self) -> int:
|
||||
"""Return the next WebSocket message ID."""
|
||||
@@ -141,10 +142,12 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
return False
|
||||
|
||||
# Step 2: Send auth
|
||||
await self._ws.send_json({
|
||||
"type": "auth",
|
||||
"access_token": self._hass_token,
|
||||
})
|
||||
await self._ws.send_json(
|
||||
{
|
||||
"type": "auth",
|
||||
"access_token": self._hass_token,
|
||||
}
|
||||
)
|
||||
|
||||
# Step 3: Wait for auth_ok
|
||||
msg = await self._ws.receive_json()
|
||||
@@ -155,11 +158,13 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
|
||||
# Step 4: Subscribe to state_changed events
|
||||
sub_id = self._next_id()
|
||||
await self._ws.send_json({
|
||||
"id": sub_id,
|
||||
"type": "subscribe_events",
|
||||
"event_type": "state_changed",
|
||||
})
|
||||
await self._ws.send_json(
|
||||
{
|
||||
"id": sub_id,
|
||||
"type": "subscribe_events",
|
||||
"event_type": "state_changed",
|
||||
}
|
||||
)
|
||||
|
||||
# Verify subscription acknowledgement
|
||||
msg = await self._ws.receive_json()
|
||||
@@ -245,7 +250,7 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
elif ws_msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.ERROR):
|
||||
break
|
||||
|
||||
async def _handle_ha_event(self, event: Dict[str, Any]) -> None:
|
||||
async def _handle_ha_event(self, event: dict[str, Any]) -> None:
|
||||
"""Process a state_changed event from Home Assistant."""
|
||||
event_data = event.get("data", {})
|
||||
entity_id: str = event_data.get("entity_id", "")
|
||||
@@ -302,9 +307,9 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
@staticmethod
|
||||
def _format_state_change(
|
||||
entity_id: str,
|
||||
old_state: Dict[str, Any],
|
||||
new_state: Dict[str, Any],
|
||||
) -> Optional[str]:
|
||||
old_state: dict[str, Any],
|
||||
new_state: dict[str, Any],
|
||||
) -> str | None:
|
||||
"""Convert a state_changed event into a human-readable description."""
|
||||
if not new_state:
|
||||
return None
|
||||
@@ -331,10 +336,7 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
|
||||
if domain == "sensor":
|
||||
unit = new_state.get("attributes", {}).get("unit_of_measurement", "")
|
||||
return (
|
||||
f"[Home Assistant] {friendly_name}: changed from "
|
||||
f"{old_val}{unit} to {new_val}{unit}"
|
||||
)
|
||||
return f"[Home Assistant] {friendly_name}: changed from {old_val}{unit} to {new_val}{unit}"
|
||||
|
||||
if domain == "binary_sensor":
|
||||
return (
|
||||
@@ -344,22 +346,13 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
)
|
||||
|
||||
if domain in ("light", "switch", "fan"):
|
||||
return (
|
||||
f"[Home Assistant] {friendly_name}: turned "
|
||||
f"{'on' if new_val == 'on' else 'off'}"
|
||||
)
|
||||
return f"[Home Assistant] {friendly_name}: turned {'on' if new_val == 'on' else 'off'}"
|
||||
|
||||
if domain == "alarm_control_panel":
|
||||
return (
|
||||
f"[Home Assistant] {friendly_name}: alarm state changed from "
|
||||
f"'{old_val}' to '{new_val}'"
|
||||
)
|
||||
return f"[Home Assistant] {friendly_name}: alarm state changed from '{old_val}' to '{new_val}'"
|
||||
|
||||
# Generic fallback
|
||||
return (
|
||||
f"[Home Assistant] {friendly_name} ({entity_id}): "
|
||||
f"changed from '{old_val}' to '{new_val}'"
|
||||
)
|
||||
return f"[Home Assistant] {friendly_name} ({entity_id}): changed from '{old_val}' to '{new_val}'"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Outbound messaging
|
||||
@@ -369,8 +362,8 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
content: str,
|
||||
reply_to: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
reply_to: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a notification via HA REST API (persistent_notification.create).
|
||||
|
||||
@@ -384,7 +377,7 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
}
|
||||
payload = {
|
||||
"title": "Hermes Agent",
|
||||
"message": content[:self.MAX_MESSAGE_LENGTH],
|
||||
"message": content[: self.MAX_MESSAGE_LENGTH],
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -401,20 +394,22 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
body = await resp.text()
|
||||
return SendResult(success=False, error=f"HTTP {resp.status}: {body}")
|
||||
else:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
async with (
|
||||
aiohttp.ClientSession() as session,
|
||||
session.post(
|
||||
url,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=10),
|
||||
) as resp:
|
||||
if resp.status < 300:
|
||||
return SendResult(success=True, message_id=uuid.uuid4().hex[:12])
|
||||
else:
|
||||
body = await resp.text()
|
||||
return SendResult(success=False, error=f"HTTP {resp.status}: {body}")
|
||||
) as resp,
|
||||
):
|
||||
if resp.status < 300:
|
||||
return SendResult(success=True, message_id=uuid.uuid4().hex[:12])
|
||||
else:
|
||||
body = await resp.text()
|
||||
return SendResult(success=False, error=f"HTTP {resp.status}: {body}")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
return SendResult(success=False, error="Timeout sending notification to HA")
|
||||
except Exception as e:
|
||||
return SendResult(success=False, error=str(e))
|
||||
@@ -423,7 +418,7 @@ class HomeAssistantAdapter(BasePlatformAdapter):
|
||||
"""No typing indicator for Home Assistant."""
|
||||
pass
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
|
||||
async def get_chat_info(self, chat_id: str) -> dict[str, Any]:
|
||||
"""Return basic info about the HA event channel."""
|
||||
return {
|
||||
"name": "Home Assistant Events",
|
||||
|
||||
@@ -19,9 +19,9 @@ import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
from typing import Any
|
||||
from urllib.parse import unquote
|
||||
|
||||
import httpx
|
||||
@@ -32,9 +32,9 @@ from gateway.platforms.base import (
|
||||
MessageEvent,
|
||||
MessageType,
|
||||
SendResult,
|
||||
cache_image_from_bytes,
|
||||
cache_audio_from_bytes,
|
||||
cache_document_from_bytes,
|
||||
cache_image_from_bytes,
|
||||
cache_image_from_url,
|
||||
)
|
||||
|
||||
@@ -59,6 +59,7 @@ _PHONE_RE = re.compile(r"\+[1-9]\d{6,14}")
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _redact_phone(phone: str) -> str:
|
||||
"""Redact a phone number for logging: +15551234567 -> +155****4567."""
|
||||
if not phone:
|
||||
@@ -68,7 +69,7 @@ def _redact_phone(phone: str) -> str:
|
||||
return phone[:4] + "****" + phone[-4:]
|
||||
|
||||
|
||||
def _parse_comma_list(value: str) -> List[str]:
|
||||
def _parse_comma_list(value: str) -> list[str]:
|
||||
"""Split a comma-separated string into a list, stripping whitespace."""
|
||||
return [v.strip() for v in value.split(",") if v.strip()]
|
||||
|
||||
@@ -110,7 +111,7 @@ def _render_mentions(text: str, mentions: list) -> str:
|
||||
Signal encodes @mentions as the Unicode object replacement character
|
||||
with out-of-band metadata containing the mentioned user's UUID/number.
|
||||
"""
|
||||
if not mentions or "\uFFFC" not in text:
|
||||
if not mentions or "\ufffc" not in text:
|
||||
return text
|
||||
# Sort mentions by start position (reverse) to replace from end to start
|
||||
# so indices don't shift as we replace
|
||||
@@ -121,7 +122,7 @@ def _render_mentions(text: str, mentions: list) -> str:
|
||||
# Use the mention's number or UUID as the replacement
|
||||
identifier = mention.get("number") or mention.get("uuid") or "user"
|
||||
replacement = f"@{identifier}"
|
||||
text = text[:start] + replacement + text[start + length:]
|
||||
text = text[:start] + replacement + text[start + length :]
|
||||
return text
|
||||
|
||||
|
||||
@@ -134,6 +135,7 @@ def check_signal_requirements() -> bool:
|
||||
# Signal Adapter
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class SignalAdapter(BasePlatformAdapter):
|
||||
"""Signal messenger adapter using signal-cli HTTP daemon."""
|
||||
|
||||
@@ -152,22 +154,25 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
self.group_allow_from = set(_parse_comma_list(group_allowed_str))
|
||||
|
||||
# HTTP client
|
||||
self.client: Optional[httpx.AsyncClient] = None
|
||||
self.client: httpx.AsyncClient | None = None
|
||||
|
||||
# Background tasks
|
||||
self._sse_task: Optional[asyncio.Task] = None
|
||||
self._health_monitor_task: Optional[asyncio.Task] = None
|
||||
self._typing_tasks: Dict[str, asyncio.Task] = {}
|
||||
self._sse_task: asyncio.Task | None = None
|
||||
self._health_monitor_task: asyncio.Task | None = None
|
||||
self._typing_tasks: dict[str, asyncio.Task] = {}
|
||||
self._running = False
|
||||
self._last_sse_activity = 0.0
|
||||
self._sse_response: Optional[httpx.Response] = None
|
||||
self._sse_response: httpx.Response | None = None
|
||||
|
||||
# Normalize account for self-message filtering
|
||||
self._account_normalized = self.account.strip()
|
||||
|
||||
logger.info("Signal adapter initialized: url=%s account=%s groups=%s",
|
||||
self.http_url, _redact_phone(self.account),
|
||||
"enabled" if self.group_allow_from else "disabled")
|
||||
logger.info(
|
||||
"Signal adapter initialized: url=%s account=%s groups=%s",
|
||||
self.http_url,
|
||||
_redact_phone(self.account),
|
||||
"enabled" if self.group_allow_from else "disabled",
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Lifecycle
|
||||
@@ -241,7 +246,8 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
try:
|
||||
logger.debug("Signal SSE: connecting to %s", url)
|
||||
async with self.client.stream(
|
||||
"GET", url,
|
||||
"GET",
|
||||
url,
|
||||
headers={"Accept": "text/event-stream"},
|
||||
timeout=None,
|
||||
) as response:
|
||||
@@ -306,9 +312,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
if elapsed > HEALTH_CHECK_STALE_THRESHOLD:
|
||||
logger.warning("Signal: SSE idle for %.0fs, checking daemon health", elapsed)
|
||||
try:
|
||||
resp = await self.client.get(
|
||||
f"{self.http_url}/api/v1/check", timeout=10.0
|
||||
)
|
||||
resp = await self.client.get(f"{self.http_url}/api/v1/check", timeout=10.0)
|
||||
if resp.status_code == 200:
|
||||
# Daemon is alive but SSE is idle — update activity to
|
||||
# avoid repeated warnings (connection may just be quiet)
|
||||
@@ -345,11 +349,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
return
|
||||
|
||||
# Extract sender info
|
||||
sender = (
|
||||
envelope_data.get("sourceNumber")
|
||||
or envelope_data.get("sourceUuid")
|
||||
or envelope_data.get("source")
|
||||
)
|
||||
sender = envelope_data.get("sourceNumber") or envelope_data.get("sourceUuid") or envelope_data.get("source")
|
||||
sender_name = envelope_data.get("sourceName", "")
|
||||
sender_uuid = envelope_data.get("sourceUuid", "")
|
||||
|
||||
@@ -367,10 +367,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
|
||||
# Get data message — also check editMessage (edited messages contain
|
||||
# their updated dataMessage inside editMessage.dataMessage)
|
||||
data_message = (
|
||||
envelope_data.get("dataMessage")
|
||||
or (envelope_data.get("editMessage") or {}).get("dataMessage")
|
||||
)
|
||||
data_message = envelope_data.get("dataMessage") or (envelope_data.get("editMessage") or {}).get("dataMessage")
|
||||
if not data_message:
|
||||
return
|
||||
|
||||
@@ -451,11 +448,11 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
ts_ms = envelope_data.get("timestamp", 0)
|
||||
if ts_ms:
|
||||
try:
|
||||
timestamp = datetime.fromtimestamp(ts_ms / 1000, tz=timezone.utc)
|
||||
timestamp = datetime.fromtimestamp(ts_ms / 1000, tz=UTC)
|
||||
except (ValueError, OSError):
|
||||
timestamp = datetime.now(tz=timezone.utc)
|
||||
timestamp = datetime.now(tz=UTC)
|
||||
else:
|
||||
timestamp = datetime.now(tz=timezone.utc)
|
||||
timestamp = datetime.now(tz=UTC)
|
||||
|
||||
# Build and dispatch event
|
||||
event = MessageEvent(
|
||||
@@ -468,8 +465,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
timestamp=timestamp,
|
||||
)
|
||||
|
||||
logger.debug("Signal: message from %s in %s: %s",
|
||||
_redact_phone(sender), chat_id[:20], (text or "")[:50])
|
||||
logger.debug("Signal: message from %s in %s: %s", _redact_phone(sender), chat_id[:20], (text or "")[:50])
|
||||
|
||||
await self.handle_message(event)
|
||||
|
||||
@@ -479,10 +475,13 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
|
||||
async def _fetch_attachment(self, attachment_id: str) -> tuple:
|
||||
"""Fetch an attachment via JSON-RPC and cache it. Returns (path, ext)."""
|
||||
result = await self._rpc("getAttachment", {
|
||||
"account": self.account,
|
||||
"attachmentId": attachment_id,
|
||||
})
|
||||
result = await self._rpc(
|
||||
"getAttachment",
|
||||
{
|
||||
"account": self.account,
|
||||
"attachmentId": attachment_id,
|
||||
},
|
||||
)
|
||||
|
||||
if not result:
|
||||
return None, ""
|
||||
@@ -547,13 +546,13 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
text: str,
|
||||
reply_to_message_id: Optional[str] = None,
|
||||
reply_to_message_id: str | None = None,
|
||||
**kwargs,
|
||||
) -> SendResult:
|
||||
"""Send a text message."""
|
||||
await self._stop_typing_indicator(chat_id)
|
||||
|
||||
params: Dict[str, Any] = {
|
||||
params: dict[str, Any] = {
|
||||
"account": self.account,
|
||||
"message": text,
|
||||
}
|
||||
@@ -571,7 +570,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
|
||||
async def send_typing(self, chat_id: str) -> None:
|
||||
"""Send a typing indicator."""
|
||||
params: Dict[str, Any] = {
|
||||
params: dict[str, Any] = {
|
||||
"account": self.account,
|
||||
}
|
||||
|
||||
@@ -586,7 +585,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_url: str,
|
||||
caption: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
**kwargs,
|
||||
) -> SendResult:
|
||||
"""Send an image. Supports http(s):// and file:// URLs."""
|
||||
@@ -611,7 +610,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
if file_size > SIGNAL_MAX_ATTACHMENT_SIZE:
|
||||
return SendResult(success=False, error=f"Image too large ({file_size} bytes)")
|
||||
|
||||
params: Dict[str, Any] = {
|
||||
params: dict[str, Any] = {
|
||||
"account": self.account,
|
||||
"message": caption or "",
|
||||
"attachments": [file_path],
|
||||
@@ -631,8 +630,8 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
file_path: str,
|
||||
caption: Optional[str] = None,
|
||||
filename: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
filename: str | None = None,
|
||||
**kwargs,
|
||||
) -> SendResult:
|
||||
"""Send a document/file attachment."""
|
||||
@@ -641,7 +640,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
if not Path(file_path).exists():
|
||||
return SendResult(success=False, error="File not found")
|
||||
|
||||
params: Dict[str, Any] = {
|
||||
params: dict[str, Any] = {
|
||||
"account": self.account,
|
||||
"message": caption or "",
|
||||
"attachments": [file_path],
|
||||
@@ -690,7 +689,7 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
# Chat Info
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
|
||||
async def get_chat_info(self, chat_id: str) -> dict[str, Any]:
|
||||
"""Get information about a chat/contact."""
|
||||
if chat_id.startswith("group:"):
|
||||
return {
|
||||
@@ -700,10 +699,13 @@ class SignalAdapter(BasePlatformAdapter):
|
||||
}
|
||||
|
||||
# Try to resolve contact name
|
||||
result = await self._rpc("getContact", {
|
||||
"account": self.account,
|
||||
"contactAddress": chat_id,
|
||||
})
|
||||
result = await self._rpc(
|
||||
"getContact",
|
||||
{
|
||||
"account": self.account,
|
||||
"contactAddress": chat_id,
|
||||
},
|
||||
)
|
||||
|
||||
name = chat_id
|
||||
if result and isinstance(result, dict):
|
||||
|
||||
@@ -10,12 +10,14 @@ Uses slack-bolt (Python) with Socket Mode for:
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from typing import Dict, List, Optional, Any
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
try:
|
||||
from slack_bolt.async_app import AsyncApp
|
||||
from slack_bolt.adapter.socket_mode.async_handler import AsyncSocketModeHandler
|
||||
from slack_bolt.async_app import AsyncApp
|
||||
from slack_sdk.web.async_client import AsyncWebClient
|
||||
|
||||
SLACK_AVAILABLE = True
|
||||
except ImportError:
|
||||
SLACK_AVAILABLE = False
|
||||
@@ -25,16 +27,17 @@ except ImportError:
|
||||
|
||||
import sys
|
||||
from pathlib import Path as _Path
|
||||
|
||||
sys.path.insert(0, str(_Path(__file__).resolve().parents[2]))
|
||||
|
||||
from gateway.config import Platform, PlatformConfig
|
||||
from gateway.platforms.base import (
|
||||
SUPPORTED_DOCUMENT_TYPES,
|
||||
BasePlatformAdapter,
|
||||
MessageEvent,
|
||||
MessageType,
|
||||
SendResult,
|
||||
cache_image_from_url,
|
||||
cache_audio_from_url,
|
||||
cache_document_from_bytes,
|
||||
)
|
||||
|
||||
|
||||
@@ -63,9 +66,9 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
|
||||
def __init__(self, config: PlatformConfig):
|
||||
super().__init__(config, Platform.SLACK)
|
||||
self._app: Optional[AsyncApp] = None
|
||||
self._handler: Optional[AsyncSocketModeHandler] = None
|
||||
self._bot_user_id: Optional[str] = None
|
||||
self._app: AsyncApp | None = None
|
||||
self._handler: AsyncSocketModeHandler | None = None
|
||||
self._bot_user_id: str | None = None
|
||||
|
||||
async def connect(self) -> bool:
|
||||
"""Connect to Slack via Socket Mode."""
|
||||
@@ -96,6 +99,13 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
async def handle_message_event(event, say):
|
||||
await self._handle_slack_message(event)
|
||||
|
||||
# Acknowledge app_mention events to prevent Bolt 404 errors.
|
||||
# The "message" handler above already processes @mentions in
|
||||
# channels, so this is intentionally a no-op to avoid duplicates.
|
||||
@self._app.event("app_mention")
|
||||
async def handle_app_mention(event, say):
|
||||
pass
|
||||
|
||||
# Register slash command handler
|
||||
@self._app.command("/hermes")
|
||||
async def handle_hermes_command(ack, command):
|
||||
@@ -125,8 +135,8 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
content: str,
|
||||
reply_to: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
reply_to: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a message to a Slack channel or DM."""
|
||||
if not self._app:
|
||||
@@ -183,8 +193,8 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a local image file to Slack by uploading it."""
|
||||
if not self._app:
|
||||
@@ -192,6 +202,7 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
|
||||
try:
|
||||
import os
|
||||
|
||||
if not os.path.exists(image_path):
|
||||
return SendResult(success=False, error=f"Image file not found: {image_path}")
|
||||
|
||||
@@ -212,8 +223,8 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_url: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send an image to Slack by uploading the URL as a file."""
|
||||
if not self._app:
|
||||
@@ -237,7 +248,7 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
|
||||
return SendResult(success=True, raw_response=result)
|
||||
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# Fall back to sending the URL as text
|
||||
text = f"{caption}\n{image_url}" if caption else image_url
|
||||
return await self.send(chat_id=chat_id, content=text, reply_to=reply_to)
|
||||
@@ -246,8 +257,8 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
audio_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send an audio file to Slack."""
|
||||
if not self._app:
|
||||
@@ -266,7 +277,66 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
except Exception as e:
|
||||
return SendResult(success=False, error=str(e))
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
|
||||
async def send_video(
|
||||
self,
|
||||
chat_id: str,
|
||||
video_path: str,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a video file to Slack."""
|
||||
if not self._app:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
if not os.path.exists(video_path):
|
||||
return SendResult(success=False, error=f"Video file not found: {video_path}")
|
||||
|
||||
try:
|
||||
result = await self._app.client.files_upload_v2(
|
||||
channel=chat_id,
|
||||
file=video_path,
|
||||
filename=os.path.basename(video_path),
|
||||
initial_comment=caption or "",
|
||||
thread_ts=reply_to,
|
||||
)
|
||||
return SendResult(success=True, raw_response=result)
|
||||
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to send video: {e}")
|
||||
return await super().send_video(chat_id, video_path, caption, reply_to)
|
||||
|
||||
async def send_document(
|
||||
self,
|
||||
chat_id: str,
|
||||
file_path: str,
|
||||
caption: str | None = None,
|
||||
file_name: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a document/file attachment to Slack."""
|
||||
if not self._app:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
return SendResult(success=False, error=f"File not found: {file_path}")
|
||||
|
||||
display_name = file_name or os.path.basename(file_path)
|
||||
|
||||
try:
|
||||
result = await self._app.client.files_upload_v2(
|
||||
channel=chat_id,
|
||||
file=file_path,
|
||||
filename=display_name,
|
||||
initial_comment=caption or "",
|
||||
thread_ts=reply_to,
|
||||
)
|
||||
return SendResult(success=True, raw_response=result)
|
||||
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to send document: {e}")
|
||||
return await super().send_document(chat_id, file_path, caption, file_name, reply_to)
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> dict[str, Any]:
|
||||
"""Get information about a Slack channel."""
|
||||
if not self._app:
|
||||
return {"name": chat_id, "type": "unknown"}
|
||||
@@ -347,6 +417,56 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
msg_type = MessageType.VOICE
|
||||
except Exception as e:
|
||||
print(f"[Slack] Failed to cache audio: {e}", flush=True)
|
||||
elif url:
|
||||
# Try to handle as a document attachment
|
||||
try:
|
||||
original_filename = f.get("name", "")
|
||||
ext = ""
|
||||
if original_filename:
|
||||
_, ext = os.path.splitext(original_filename)
|
||||
ext = ext.lower()
|
||||
|
||||
# Fallback: reverse-lookup from MIME type
|
||||
if not ext and mimetype:
|
||||
mime_to_ext = {v: k for k, v in SUPPORTED_DOCUMENT_TYPES.items()}
|
||||
ext = mime_to_ext.get(mimetype, "")
|
||||
|
||||
if ext not in SUPPORTED_DOCUMENT_TYPES:
|
||||
continue # Skip unsupported file types silently
|
||||
|
||||
# Check file size (Slack limit: 20 MB for bots)
|
||||
file_size = f.get("size", 0)
|
||||
MAX_DOC_BYTES = 20 * 1024 * 1024
|
||||
if not file_size or file_size > MAX_DOC_BYTES:
|
||||
print(f"[Slack] Document too large or unknown size: {file_size}", flush=True)
|
||||
continue
|
||||
|
||||
# Download and cache
|
||||
raw_bytes = await self._download_slack_file_bytes(url)
|
||||
cached_path = cache_document_from_bytes(raw_bytes, original_filename or f"document{ext}")
|
||||
doc_mime = SUPPORTED_DOCUMENT_TYPES[ext]
|
||||
media_urls.append(cached_path)
|
||||
media_types.append(doc_mime)
|
||||
msg_type = MessageType.DOCUMENT
|
||||
print(f"[Slack] Cached user document: {cached_path}", flush=True)
|
||||
|
||||
# Inject text content for .txt/.md files (capped at 100 KB)
|
||||
MAX_TEXT_INJECT_BYTES = 100 * 1024
|
||||
if ext in (".md", ".txt") and len(raw_bytes) <= MAX_TEXT_INJECT_BYTES:
|
||||
try:
|
||||
text_content = raw_bytes.decode("utf-8")
|
||||
display_name = original_filename or f"document{ext}"
|
||||
display_name = re.sub(r"[^\w.\- ]", "_", display_name)
|
||||
injection = f"[Content of {display_name}]:\n{text_content}"
|
||||
if text:
|
||||
text = f"{injection}\n\n{text}"
|
||||
else:
|
||||
text = injection
|
||||
except UnicodeDecodeError:
|
||||
pass # Binary content, skip injection
|
||||
|
||||
except Exception as e:
|
||||
print(f"[Slack] Failed to cache document: {e}", flush=True)
|
||||
|
||||
# Build source
|
||||
source = self.build_source(
|
||||
@@ -378,16 +498,20 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
|
||||
# Map subcommands to gateway commands
|
||||
subcommand_map = {
|
||||
"new": "/reset", "reset": "/reset",
|
||||
"status": "/status", "stop": "/stop",
|
||||
"new": "/reset",
|
||||
"reset": "/reset",
|
||||
"status": "/status",
|
||||
"stop": "/stop",
|
||||
"help": "/help",
|
||||
"model": "/model", "personality": "/personality",
|
||||
"retry": "/retry", "undo": "/undo",
|
||||
"model": "/model",
|
||||
"personality": "/personality",
|
||||
"retry": "/retry",
|
||||
"undo": "/undo",
|
||||
}
|
||||
first_word = text.split()[0] if text else ""
|
||||
if first_word in subcommand_map:
|
||||
# Preserve arguments after the subcommand
|
||||
rest = text[len(first_word):].strip()
|
||||
rest = text[len(first_word) :].strip()
|
||||
text = f"{subcommand_map[first_word]} {rest}".strip() if rest else subcommand_map[first_word]
|
||||
elif text:
|
||||
pass # Treat as a regular question
|
||||
@@ -423,7 +547,22 @@ class SlackAdapter(BasePlatformAdapter):
|
||||
|
||||
if audio:
|
||||
from gateway.platforms.base import cache_audio_from_bytes
|
||||
|
||||
return cache_audio_from_bytes(response.content, ext)
|
||||
else:
|
||||
from gateway.platforms.base import cache_image_from_bytes
|
||||
|
||||
return cache_image_from_bytes(response.content, ext)
|
||||
|
||||
async def _download_slack_file_bytes(self, url: str) -> bytes:
|
||||
"""Download a Slack file and return raw bytes."""
|
||||
import httpx
|
||||
|
||||
bot_token = self.config.token
|
||||
async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as client:
|
||||
response = await client.get(
|
||||
url,
|
||||
headers={"Authorization": f"Bearer {bot_token}"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.content
|
||||
|
||||
@@ -7,24 +7,26 @@ Uses python-telegram-bot library for:
|
||||
- Handling media and commands
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import Dict, List, Optional, Any
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from telegram import Update, Bot, Message
|
||||
from telegram import Bot, Message, Update
|
||||
from telegram.constants import ChatType, ParseMode
|
||||
from telegram.ext import (
|
||||
Application,
|
||||
CommandHandler,
|
||||
MessageHandler as TelegramMessageHandler,
|
||||
ContextTypes,
|
||||
filters,
|
||||
)
|
||||
from telegram.constants import ParseMode, ChatType
|
||||
from telegram.ext import (
|
||||
MessageHandler as TelegramMessageHandler,
|
||||
)
|
||||
|
||||
TELEGRAM_AVAILABLE = True
|
||||
except ImportError:
|
||||
TELEGRAM_AVAILABLE = False
|
||||
@@ -42,22 +44,24 @@ except ImportError:
|
||||
# don't crash during class definition when the library isn't installed.
|
||||
class _MockContextTypes:
|
||||
DEFAULT_TYPE = Any
|
||||
|
||||
ContextTypes = _MockContextTypes
|
||||
|
||||
import sys
|
||||
from pathlib import Path as _Path
|
||||
|
||||
sys.path.insert(0, str(_Path(__file__).resolve().parents[2]))
|
||||
|
||||
from gateway.config import Platform, PlatformConfig
|
||||
from gateway.platforms.base import (
|
||||
SUPPORTED_DOCUMENT_TYPES,
|
||||
BasePlatformAdapter,
|
||||
MessageEvent,
|
||||
MessageType,
|
||||
SendResult,
|
||||
cache_image_from_bytes,
|
||||
cache_audio_from_bytes,
|
||||
cache_document_from_bytes,
|
||||
SUPPORTED_DOCUMENT_TYPES,
|
||||
cache_image_from_bytes,
|
||||
)
|
||||
|
||||
|
||||
@@ -68,12 +72,12 @@ def check_telegram_requirements() -> bool:
|
||||
|
||||
# Matches every character that MarkdownV2 requires to be backslash-escaped
|
||||
# when it appears outside a code span or fenced code block.
|
||||
_MDV2_ESCAPE_RE = re.compile(r'([_*\[\]()~`>#\+\-=|{}.!\\])')
|
||||
_MDV2_ESCAPE_RE = re.compile(r"([_*\[\]()~`>#\+\-=|{}.!\\])")
|
||||
|
||||
|
||||
def _escape_mdv2(text: str) -> str:
|
||||
"""Escape Telegram MarkdownV2 special characters with a preceding backslash."""
|
||||
return _MDV2_ESCAPE_RE.sub(r'\\\1', text)
|
||||
return _MDV2_ESCAPE_RE.sub(r"\\\1", text)
|
||||
|
||||
|
||||
def _strip_mdv2(text: str) -> str:
|
||||
@@ -83,99 +87,108 @@ def _strip_mdv2(text: str) -> str:
|
||||
doesn't show stray asterisks from header/bold conversion.
|
||||
"""
|
||||
# Remove escape backslashes before special characters
|
||||
cleaned = re.sub(r'\\([_*\[\]()~`>#\+\-=|{}.!\\])', r'\1', text)
|
||||
cleaned = re.sub(r"\\([_*\[\]()~`>#\+\-=|{}.!\\])", r"\1", text)
|
||||
# Remove MarkdownV2 bold markers that format_message converted from **bold**
|
||||
cleaned = re.sub(r'\*([^*]+)\*', r'\1', cleaned)
|
||||
cleaned = re.sub(r"\*([^*]+)\*", r"\1", cleaned)
|
||||
return cleaned
|
||||
|
||||
|
||||
class TelegramAdapter(BasePlatformAdapter):
|
||||
"""
|
||||
Telegram bot adapter.
|
||||
|
||||
|
||||
Handles:
|
||||
- Receiving messages from users and groups
|
||||
- Sending responses with Telegram markdown
|
||||
- Forum topics (thread_id support)
|
||||
- Media messages
|
||||
"""
|
||||
|
||||
|
||||
# Telegram message limits
|
||||
MAX_MESSAGE_LENGTH = 4096
|
||||
|
||||
|
||||
def __init__(self, config: PlatformConfig):
|
||||
super().__init__(config, Platform.TELEGRAM)
|
||||
self._app: Optional[Application] = None
|
||||
self._bot: Optional[Bot] = None
|
||||
|
||||
self._app: Application | None = None
|
||||
self._bot: Bot | None = None
|
||||
|
||||
async def connect(self) -> bool:
|
||||
"""Connect to Telegram and start polling for updates."""
|
||||
if not TELEGRAM_AVAILABLE:
|
||||
print(f"[{self.name}] python-telegram-bot not installed. Run: pip install python-telegram-bot")
|
||||
return False
|
||||
|
||||
|
||||
if not self.config.token:
|
||||
print(f"[{self.name}] No bot token configured")
|
||||
return False
|
||||
|
||||
|
||||
try:
|
||||
# Build the application
|
||||
self._app = Application.builder().token(self.config.token).build()
|
||||
self._bot = self._app.bot
|
||||
|
||||
|
||||
# Register handlers
|
||||
self._app.add_handler(TelegramMessageHandler(
|
||||
filters.TEXT & ~filters.COMMAND,
|
||||
self._handle_text_message
|
||||
))
|
||||
self._app.add_handler(TelegramMessageHandler(
|
||||
filters.COMMAND,
|
||||
self._handle_command
|
||||
))
|
||||
self._app.add_handler(TelegramMessageHandler(
|
||||
filters.PHOTO | filters.VIDEO | filters.AUDIO | filters.VOICE | filters.Document.ALL | filters.Sticker.ALL,
|
||||
self._handle_media_message
|
||||
))
|
||||
|
||||
self._app.add_handler(TelegramMessageHandler(filters.TEXT & ~filters.COMMAND, self._handle_text_message))
|
||||
self._app.add_handler(TelegramMessageHandler(filters.COMMAND, self._handle_command))
|
||||
self._app.add_handler(
|
||||
TelegramMessageHandler(
|
||||
filters.LOCATION | getattr(filters, "VENUE", filters.LOCATION), self._handle_location_message
|
||||
)
|
||||
)
|
||||
self._app.add_handler(
|
||||
TelegramMessageHandler(
|
||||
filters.PHOTO
|
||||
| filters.VIDEO
|
||||
| filters.AUDIO
|
||||
| filters.VOICE
|
||||
| filters.Document.ALL
|
||||
| filters.Sticker.ALL,
|
||||
self._handle_media_message,
|
||||
)
|
||||
)
|
||||
|
||||
# Start polling in background
|
||||
await self._app.initialize()
|
||||
await self._app.start()
|
||||
await self._app.updater.start_polling(allowed_updates=Update.ALL_TYPES)
|
||||
|
||||
|
||||
# Register bot commands so Telegram shows a hint menu when users type /
|
||||
try:
|
||||
from telegram import BotCommand
|
||||
await self._bot.set_my_commands([
|
||||
BotCommand("new", "Start a new conversation"),
|
||||
BotCommand("reset", "Reset conversation history"),
|
||||
BotCommand("model", "Show or change the model"),
|
||||
BotCommand("personality", "Set a personality"),
|
||||
BotCommand("retry", "Retry your last message"),
|
||||
BotCommand("undo", "Remove the last exchange"),
|
||||
BotCommand("status", "Show session info"),
|
||||
BotCommand("stop", "Stop the running agent"),
|
||||
BotCommand("sethome", "Set this chat as the home channel"),
|
||||
BotCommand("compress", "Compress conversation context"),
|
||||
BotCommand("title", "Set or show the session title"),
|
||||
BotCommand("resume", "Resume a previously-named session"),
|
||||
BotCommand("usage", "Show token usage for this session"),
|
||||
BotCommand("provider", "Show available providers"),
|
||||
BotCommand("insights", "Show usage insights and analytics"),
|
||||
BotCommand("update", "Update Hermes to the latest version"),
|
||||
BotCommand("reload_mcp", "Reload MCP servers from config"),
|
||||
BotCommand("help", "Show available commands"),
|
||||
])
|
||||
|
||||
await self._bot.set_my_commands(
|
||||
[
|
||||
BotCommand("new", "Start a new conversation"),
|
||||
BotCommand("reset", "Reset conversation history"),
|
||||
BotCommand("model", "Show or change the model"),
|
||||
BotCommand("personality", "Set a personality"),
|
||||
BotCommand("retry", "Retry your last message"),
|
||||
BotCommand("undo", "Remove the last exchange"),
|
||||
BotCommand("status", "Show session info"),
|
||||
BotCommand("stop", "Stop the running agent"),
|
||||
BotCommand("sethome", "Set this chat as the home channel"),
|
||||
BotCommand("compress", "Compress conversation context"),
|
||||
BotCommand("title", "Set or show the session title"),
|
||||
BotCommand("resume", "Resume a previously-named session"),
|
||||
BotCommand("usage", "Show token usage for this session"),
|
||||
BotCommand("provider", "Show available providers"),
|
||||
BotCommand("insights", "Show usage insights and analytics"),
|
||||
BotCommand("update", "Update Hermes to the latest version"),
|
||||
BotCommand("reload_mcp", "Reload MCP servers from config"),
|
||||
BotCommand("help", "Show available commands"),
|
||||
]
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Could not register command menu: {e}")
|
||||
|
||||
|
||||
self._running = True
|
||||
print(f"[{self.name}] Connected and polling for updates")
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to connect: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def disconnect(self) -> None:
|
||||
"""Stop polling and disconnect."""
|
||||
if self._app:
|
||||
@@ -185,31 +198,27 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
await self._app.shutdown()
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Error during disconnect: {e}")
|
||||
|
||||
|
||||
self._running = False
|
||||
self._app = None
|
||||
self._bot = None
|
||||
print(f"[{self.name}] Disconnected")
|
||||
|
||||
|
||||
async def send(
|
||||
self,
|
||||
chat_id: str,
|
||||
content: str,
|
||||
reply_to: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
self, chat_id: str, content: str, reply_to: str | None = None, metadata: dict[str, Any] | None = None
|
||||
) -> SendResult:
|
||||
"""Send a message to a Telegram chat."""
|
||||
if not self._bot:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
# Format and split message if needed
|
||||
formatted = self.format_message(content)
|
||||
chunks = self.truncate_message(formatted, self.MAX_MESSAGE_LENGTH)
|
||||
|
||||
|
||||
message_ids = []
|
||||
thread_id = metadata.get("thread_id") if metadata else None
|
||||
|
||||
|
||||
for i, chunk in enumerate(chunks):
|
||||
# Try Markdown first, fall back to plain text if it fails
|
||||
try:
|
||||
@@ -223,7 +232,9 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
except Exception as md_error:
|
||||
# Markdown parsing failed, try plain text
|
||||
if "parse" in str(md_error).lower() or "markdown" in str(md_error).lower():
|
||||
logger.warning("[%s] MarkdownV2 parse failed, falling back to plain text: %s", self.name, md_error)
|
||||
logger.warning(
|
||||
"[%s] MarkdownV2 parse failed, falling back to plain text: %s", self.name, md_error
|
||||
)
|
||||
# Strip MDV2 escape backslashes so the user doesn't
|
||||
# see raw backslashes littered through the message.
|
||||
plain_chunk = _strip_mdv2(chunk)
|
||||
@@ -237,13 +248,13 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
else:
|
||||
raise # Re-raise if not a parse error
|
||||
message_ids.append(str(msg.message_id))
|
||||
|
||||
|
||||
return SendResult(
|
||||
success=True,
|
||||
message_id=message_ids[0] if message_ids else None,
|
||||
raw_response={"message_ids": message_ids}
|
||||
raw_response={"message_ids": message_ids},
|
||||
)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
return SendResult(success=False, error=str(e))
|
||||
|
||||
@@ -280,18 +291,19 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
audio_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send audio as a native Telegram voice message or audio file."""
|
||||
if not self._bot:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
import os
|
||||
|
||||
if not os.path.exists(audio_path):
|
||||
return SendResult(success=False, error=f"Audio file not found: {audio_path}")
|
||||
|
||||
|
||||
with open(audio_path, "rb") as audio_file:
|
||||
# .ogg files -> send as voice (round playable bubble)
|
||||
if audio_path.endswith(".ogg") or audio_path.endswith(".opus"):
|
||||
@@ -313,23 +325,24 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to send voice/audio: {e}")
|
||||
return await super().send_voice(chat_id, audio_path, caption, reply_to)
|
||||
|
||||
|
||||
async def send_image_file(
|
||||
self,
|
||||
chat_id: str,
|
||||
image_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a local image file natively as a Telegram photo."""
|
||||
if not self._bot:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
import os
|
||||
|
||||
if not os.path.exists(image_path):
|
||||
return SendResult(success=False, error=f"Image file not found: {image_path}")
|
||||
|
||||
|
||||
with open(image_path, "rb") as image_file:
|
||||
msg = await self._bot.send_photo(
|
||||
chat_id=int(chat_id),
|
||||
@@ -346,17 +359,17 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_url: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send an image natively as a Telegram photo.
|
||||
|
||||
|
||||
Tries URL-based send first (fast, works for <5MB images).
|
||||
Falls back to downloading and uploading as file (supports up to 10MB).
|
||||
"""
|
||||
if not self._bot:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
# Telegram can send photos directly from URLs (up to ~5MB)
|
||||
msg = await self._bot.send_photo(
|
||||
@@ -371,11 +384,12 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
# Fallback: download and upload as file (supports up to 10MB)
|
||||
try:
|
||||
import httpx
|
||||
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
resp = await client.get(image_url)
|
||||
resp.raise_for_status()
|
||||
image_data = resp.content
|
||||
|
||||
|
||||
msg = await self._bot.send_photo(
|
||||
chat_id=int(chat_id),
|
||||
photo=image_data,
|
||||
@@ -387,18 +401,18 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
logger.error("[%s] File upload send_photo also failed: %s", self.name, e2)
|
||||
# Final fallback: send URL as text
|
||||
return await super().send_image(chat_id, image_url, caption, reply_to)
|
||||
|
||||
|
||||
async def send_animation(
|
||||
self,
|
||||
chat_id: str,
|
||||
animation_url: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send an animated GIF natively as a Telegram animation (auto-plays inline)."""
|
||||
if not self._bot:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
msg = await self._bot.send_animation(
|
||||
chat_id=int(chat_id),
|
||||
@@ -416,21 +430,18 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
"""Send typing indicator."""
|
||||
if self._bot:
|
||||
try:
|
||||
await self._bot.send_chat_action(
|
||||
chat_id=int(chat_id),
|
||||
action="typing"
|
||||
)
|
||||
await self._bot.send_chat_action(chat_id=int(chat_id), action="typing")
|
||||
except Exception:
|
||||
pass # Ignore typing indicator failures
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> dict[str, Any]:
|
||||
"""Get information about a Telegram chat."""
|
||||
if not self._bot:
|
||||
return {"name": "Unknown", "type": "dm"}
|
||||
|
||||
|
||||
try:
|
||||
chat = await self._bot.get_chat(int(chat_id))
|
||||
|
||||
|
||||
chat_type = "dm"
|
||||
if chat.type == ChatType.GROUP:
|
||||
chat_type = "group"
|
||||
@@ -440,7 +451,7 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
chat_type = "forum"
|
||||
elif chat.type == ChatType.CHANNEL:
|
||||
chat_type = "channel"
|
||||
|
||||
|
||||
return {
|
||||
"name": chat.title or chat.full_name or str(chat_id),
|
||||
"type": chat_type,
|
||||
@@ -449,7 +460,7 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
}
|
||||
except Exception as e:
|
||||
return {"name": str(chat_id), "type": "dm", "error": str(e)}
|
||||
|
||||
|
||||
def format_message(self, content: str) -> str:
|
||||
"""
|
||||
Convert standard markdown to Telegram MarkdownV2 format.
|
||||
@@ -476,38 +487,36 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
|
||||
# 1) Protect fenced code blocks (``` ... ```)
|
||||
text = re.sub(
|
||||
r'(```(?:[^\n]*\n)?[\s\S]*?```)',
|
||||
r"(```(?:[^\n]*\n)?[\s\S]*?```)",
|
||||
lambda m: _ph(m.group(0)),
|
||||
text,
|
||||
)
|
||||
|
||||
# 2) Protect inline code (`...`)
|
||||
text = re.sub(r'(`[^`]+`)', lambda m: _ph(m.group(0)), text)
|
||||
text = re.sub(r"(`[^`]+`)", lambda m: _ph(m.group(0)), text)
|
||||
|
||||
# 3) Convert markdown links – escape the display text; inside the URL
|
||||
# only ')' and '\' need escaping per the MarkdownV2 spec.
|
||||
def _convert_link(m):
|
||||
display = _escape_mdv2(m.group(1))
|
||||
url = m.group(2).replace('\\', '\\\\').replace(')', '\\)')
|
||||
return _ph(f'[{display}]({url})')
|
||||
url = m.group(2).replace("\\", "\\\\").replace(")", "\\)")
|
||||
return _ph(f"[{display}]({url})")
|
||||
|
||||
text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', _convert_link, text)
|
||||
text = re.sub(r"\[([^\]]+)\]\(([^)]+)\)", _convert_link, text)
|
||||
|
||||
# 4) Convert markdown headers (## Title) → bold *Title*
|
||||
def _convert_header(m):
|
||||
inner = m.group(1).strip()
|
||||
# Strip redundant bold markers that may appear inside a header
|
||||
inner = re.sub(r'\*\*(.+?)\*\*', r'\1', inner)
|
||||
return _ph(f'*{_escape_mdv2(inner)}*')
|
||||
inner = re.sub(r"\*\*(.+?)\*\*", r"\1", inner)
|
||||
return _ph(f"*{_escape_mdv2(inner)}*")
|
||||
|
||||
text = re.sub(
|
||||
r'^#{1,6}\s+(.+)$', _convert_header, text, flags=re.MULTILINE
|
||||
)
|
||||
text = re.sub(r"^#{1,6}\s+(.+)$", _convert_header, text, flags=re.MULTILINE)
|
||||
|
||||
# 5) Convert bold: **text** → *text* (MarkdownV2 bold)
|
||||
text = re.sub(
|
||||
r'\*\*(.+?)\*\*',
|
||||
lambda m: _ph(f'*{_escape_mdv2(m.group(1))}*'),
|
||||
r"\*\*(.+?)\*\*",
|
||||
lambda m: _ph(f"*{_escape_mdv2(m.group(1))}*"),
|
||||
text,
|
||||
)
|
||||
|
||||
@@ -515,8 +524,8 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
# [^*\n]+ prevents matching across newlines (which would corrupt
|
||||
# bullet lists using * markers and multi-line content).
|
||||
text = re.sub(
|
||||
r'\*([^*\n]+)\*',
|
||||
lambda m: _ph(f'_{_escape_mdv2(m.group(1))}_'),
|
||||
r"\*([^*\n]+)\*",
|
||||
lambda m: _ph(f"_{_escape_mdv2(m.group(1))}_"),
|
||||
text,
|
||||
)
|
||||
|
||||
@@ -529,30 +538,65 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
text = text.replace(key, placeholders[key])
|
||||
|
||||
return text
|
||||
|
||||
|
||||
async def _handle_text_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""Handle incoming text messages."""
|
||||
if not update.message or not update.message.text:
|
||||
return
|
||||
|
||||
|
||||
event = self._build_message_event(update.message, MessageType.TEXT)
|
||||
await self.handle_message(event)
|
||||
|
||||
|
||||
async def _handle_command(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""Handle incoming command messages."""
|
||||
if not update.message or not update.message.text:
|
||||
return
|
||||
|
||||
|
||||
event = self._build_message_event(update.message, MessageType.COMMAND)
|
||||
await self.handle_message(event)
|
||||
|
||||
|
||||
async def _handle_location_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""Handle incoming location/venue pin messages."""
|
||||
if not update.message:
|
||||
return
|
||||
|
||||
msg = update.message
|
||||
venue = getattr(msg, "venue", None)
|
||||
location = getattr(venue, "location", None) if venue else getattr(msg, "location", None)
|
||||
|
||||
if not location:
|
||||
return
|
||||
|
||||
lat = getattr(location, "latitude", None)
|
||||
lon = getattr(location, "longitude", None)
|
||||
if lat is None or lon is None:
|
||||
return
|
||||
|
||||
# Build a text message with coordinates and context
|
||||
parts = ["[The user shared a location pin.]"]
|
||||
if venue:
|
||||
title = getattr(venue, "title", None)
|
||||
address = getattr(venue, "address", None)
|
||||
if title:
|
||||
parts.append(f"Venue: {title}")
|
||||
if address:
|
||||
parts.append(f"Address: {address}")
|
||||
parts.append(f"latitude: {lat}")
|
||||
parts.append(f"longitude: {lon}")
|
||||
parts.append(f"Map: https://www.google.com/maps/search/?api=1&query={lat},{lon}")
|
||||
parts.append("Ask what they'd like to find nearby (restaurants, cafes, etc.) and any preferences.")
|
||||
|
||||
event = self._build_message_event(msg, MessageType.LOCATION)
|
||||
event.text = "\n".join(parts)
|
||||
await self.handle_message(event)
|
||||
|
||||
async def _handle_media_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
"""Handle incoming media messages, downloading images to local cache."""
|
||||
if not update.message:
|
||||
return
|
||||
|
||||
|
||||
msg = update.message
|
||||
|
||||
|
||||
# Determine media type
|
||||
if msg.sticker:
|
||||
msg_type = MessageType.STICKER
|
||||
@@ -568,19 +612,19 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
msg_type = MessageType.DOCUMENT
|
||||
else:
|
||||
msg_type = MessageType.DOCUMENT
|
||||
|
||||
|
||||
event = self._build_message_event(msg, msg_type)
|
||||
|
||||
|
||||
# Add caption as text
|
||||
if msg.caption:
|
||||
event.text = msg.caption
|
||||
|
||||
|
||||
# Handle stickers: describe via vision tool with caching
|
||||
if msg.sticker:
|
||||
await self._handle_sticker(msg, event)
|
||||
await self.handle_message(event)
|
||||
return
|
||||
|
||||
|
||||
# Download photo to local image cache so the vision tool can access it
|
||||
# even after Telegram's ephemeral file URLs expire (~1 hour).
|
||||
if msg.photo:
|
||||
@@ -604,7 +648,7 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
print(f"[Telegram] Cached user photo: {cached_path}", flush=True)
|
||||
except Exception as e:
|
||||
print(f"[Telegram] Failed to cache photo: {e}", flush=True)
|
||||
|
||||
|
||||
# Download voice/audio messages to cache for STT transcription
|
||||
if msg.voice:
|
||||
try:
|
||||
@@ -646,10 +690,7 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
# Check if supported
|
||||
if ext not in SUPPORTED_DOCUMENT_TYPES:
|
||||
supported_list = ", ".join(sorted(SUPPORTED_DOCUMENT_TYPES.keys()))
|
||||
event.text = (
|
||||
f"Unsupported document type '{ext or 'unknown'}'. "
|
||||
f"Supported types: {supported_list}"
|
||||
)
|
||||
event.text = f"Unsupported document type '{ext or 'unknown'}'. Supported types: {supported_list}"
|
||||
print(f"[Telegram] Unsupported document type: {ext or 'unknown'}", flush=True)
|
||||
await self.handle_message(event)
|
||||
return
|
||||
@@ -657,10 +698,7 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
# Check file size (Telegram Bot API limit: 20 MB)
|
||||
MAX_DOC_BYTES = 20 * 1024 * 1024
|
||||
if not doc.file_size or doc.file_size > MAX_DOC_BYTES:
|
||||
event.text = (
|
||||
"The document is too large or its size could not be verified. "
|
||||
"Maximum: 20 MB."
|
||||
)
|
||||
event.text = "The document is too large or its size could not be verified. Maximum: 20 MB."
|
||||
print(f"[Telegram] Document too large: {doc.file_size} bytes", flush=True)
|
||||
await self.handle_message(event)
|
||||
return
|
||||
@@ -681,20 +719,20 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
try:
|
||||
text_content = raw_bytes.decode("utf-8")
|
||||
display_name = original_filename or f"document{ext}"
|
||||
display_name = re.sub(r'[^\w.\- ]', '_', display_name)
|
||||
display_name = re.sub(r"[^\w.\- ]", "_", display_name)
|
||||
injection = f"[Content of {display_name}]:\n{text_content}"
|
||||
if event.text:
|
||||
event.text = f"{injection}\n\n{event.text}"
|
||||
else:
|
||||
event.text = injection
|
||||
except UnicodeDecodeError:
|
||||
print(f"[Telegram] Could not decode text file as UTF-8, skipping content injection", flush=True)
|
||||
print("[Telegram] Could not decode text file as UTF-8, skipping content injection", flush=True)
|
||||
|
||||
except Exception as e:
|
||||
print(f"[Telegram] Failed to cache document: {e}", flush=True)
|
||||
|
||||
await self.handle_message(event)
|
||||
|
||||
|
||||
async def _handle_sticker(self, msg: Message, event: "MessageEvent") -> None:
|
||||
"""
|
||||
Describe a Telegram sticker via vision analysis, with caching.
|
||||
@@ -704,11 +742,11 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
a placeholder noting the emoji.
|
||||
"""
|
||||
from gateway.sticker_cache import (
|
||||
get_cached_description,
|
||||
cache_sticker_description,
|
||||
build_sticker_injection,
|
||||
build_animated_sticker_injection,
|
||||
STICKER_VISION_PROMPT,
|
||||
build_animated_sticker_injection,
|
||||
build_sticker_injection,
|
||||
cache_sticker_description,
|
||||
get_cached_description,
|
||||
)
|
||||
|
||||
sticker = msg.sticker
|
||||
@@ -736,9 +774,10 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
cached_path = cache_image_from_bytes(bytes(image_bytes), ext=".webp")
|
||||
print(f"[Telegram] Analyzing sticker: {cached_path}", flush=True)
|
||||
|
||||
from tools.vision_tools import vision_analyze_tool
|
||||
import json as _json
|
||||
|
||||
from tools.vision_tools import vision_analyze_tool
|
||||
|
||||
result_json = await vision_analyze_tool(
|
||||
image_url=cached_path,
|
||||
user_prompt=STICKER_VISION_PROMPT,
|
||||
@@ -753,27 +792,29 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
# Vision failed -- use emoji as fallback
|
||||
event.text = build_sticker_injection(
|
||||
f"a sticker with emoji {emoji}" if emoji else "a sticker",
|
||||
emoji, set_name,
|
||||
emoji,
|
||||
set_name,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"[Telegram] Sticker analysis error: {e}", flush=True)
|
||||
event.text = build_sticker_injection(
|
||||
f"a sticker with emoji {emoji}" if emoji else "a sticker",
|
||||
emoji, set_name,
|
||||
emoji,
|
||||
set_name,
|
||||
)
|
||||
|
||||
def _build_message_event(self, message: Message, msg_type: MessageType) -> MessageEvent:
|
||||
"""Build a MessageEvent from a Telegram message."""
|
||||
chat = message.chat
|
||||
user = message.from_user
|
||||
|
||||
|
||||
# Determine chat type
|
||||
chat_type = "dm"
|
||||
if chat.type in (ChatType.GROUP, ChatType.SUPERGROUP):
|
||||
chat_type = "group"
|
||||
elif chat.type == ChatType.CHANNEL:
|
||||
chat_type = "channel"
|
||||
|
||||
|
||||
# Build source
|
||||
source = self.build_source(
|
||||
chat_id=str(chat.id),
|
||||
@@ -783,7 +824,7 @@ class TelegramAdapter(BasePlatformAdapter):
|
||||
user_name=user.full_name if user else None,
|
||||
thread_id=str(message.message_thread_id) if message.message_thread_id else None,
|
||||
)
|
||||
|
||||
|
||||
return MessageEvent(
|
||||
text=message.text or "",
|
||||
message_type=msg_type,
|
||||
|
||||
@@ -16,7 +16,6 @@ with different backends via a bridge pattern.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
@@ -24,7 +23,7 @@ import subprocess
|
||||
|
||||
_IS_WINDOWS = platform.system() == "Windows"
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -36,7 +35,9 @@ def _kill_port_process(port: int) -> None:
|
||||
# Use netstat to find the PID bound to this port, then taskkill
|
||||
result = subprocess.run(
|
||||
["netstat", "-ano", "-p", "TCP"],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
for line in result.stdout.splitlines():
|
||||
parts = line.split()
|
||||
@@ -46,24 +47,29 @@ def _kill_port_process(port: int) -> None:
|
||||
try:
|
||||
subprocess.run(
|
||||
["taskkill", "/PID", parts[4], "/F"],
|
||||
capture_output=True, timeout=5,
|
||||
capture_output=True,
|
||||
timeout=5,
|
||||
)
|
||||
except subprocess.SubprocessError:
|
||||
pass
|
||||
else:
|
||||
result = subprocess.run(
|
||||
["fuser", f"{port}/tcp"],
|
||||
capture_output=True, timeout=5,
|
||||
capture_output=True,
|
||||
timeout=5,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
subprocess.run(
|
||||
["fuser", "-k", f"{port}/tcp"],
|
||||
capture_output=True, timeout=5,
|
||||
capture_output=True,
|
||||
timeout=5,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
|
||||
|
||||
from gateway.config import Platform, PlatformConfig
|
||||
@@ -72,25 +78,20 @@ from gateway.platforms.base import (
|
||||
MessageEvent,
|
||||
MessageType,
|
||||
SendResult,
|
||||
cache_image_from_url,
|
||||
cache_audio_from_url,
|
||||
cache_image_from_url,
|
||||
)
|
||||
|
||||
|
||||
def check_whatsapp_requirements() -> bool:
|
||||
"""
|
||||
Check if WhatsApp dependencies are available.
|
||||
|
||||
|
||||
WhatsApp requires a Node.js bridge for most implementations.
|
||||
"""
|
||||
# Check for Node.js
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["node", "--version"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5
|
||||
)
|
||||
result = subprocess.run(["node", "--version"], capture_output=True, text=True, timeout=5)
|
||||
return result.returncode == 0
|
||||
except Exception:
|
||||
return False
|
||||
@@ -99,62 +100,61 @@ def check_whatsapp_requirements() -> bool:
|
||||
class WhatsAppAdapter(BasePlatformAdapter):
|
||||
"""
|
||||
WhatsApp adapter.
|
||||
|
||||
|
||||
This implementation uses a simple HTTP bridge pattern where:
|
||||
1. A Node.js process runs the WhatsApp Web client
|
||||
2. Messages are forwarded via HTTP/IPC to this Python adapter
|
||||
3. Responses are sent back through the bridge
|
||||
|
||||
|
||||
The actual Node.js bridge implementation can vary:
|
||||
- whatsapp-web.js based
|
||||
- Baileys based
|
||||
- Business API based
|
||||
|
||||
|
||||
Configuration:
|
||||
- bridge_script: Path to the Node.js bridge script
|
||||
- bridge_port: Port for HTTP communication (default: 3000)
|
||||
- session_path: Path to store WhatsApp session data
|
||||
"""
|
||||
|
||||
|
||||
# WhatsApp message limits
|
||||
MAX_MESSAGE_LENGTH = 65536 # WhatsApp allows longer messages
|
||||
|
||||
|
||||
# Default bridge location relative to the hermes-agent install
|
||||
_DEFAULT_BRIDGE_DIR = Path(__file__).resolve().parents[2] / "scripts" / "whatsapp-bridge"
|
||||
|
||||
def __init__(self, config: PlatformConfig):
|
||||
super().__init__(config, Platform.WHATSAPP)
|
||||
self._bridge_process: Optional[subprocess.Popen] = None
|
||||
self._bridge_process: subprocess.Popen | None = None
|
||||
self._bridge_port: int = config.extra.get("bridge_port", 3000)
|
||||
self._bridge_script: Optional[str] = config.extra.get(
|
||||
self._bridge_script: str | None = config.extra.get(
|
||||
"bridge_script",
|
||||
str(self._DEFAULT_BRIDGE_DIR / "bridge.js"),
|
||||
)
|
||||
self._session_path: Path = Path(config.extra.get(
|
||||
"session_path",
|
||||
Path.home() / ".hermes" / "whatsapp" / "session"
|
||||
))
|
||||
self._session_path: Path = Path(
|
||||
config.extra.get("session_path", Path.home() / ".hermes" / "whatsapp" / "session")
|
||||
)
|
||||
self._message_queue: asyncio.Queue = asyncio.Queue()
|
||||
self._bridge_log_fh = None
|
||||
self._bridge_log: Optional[Path] = None
|
||||
|
||||
self._bridge_log: Path | None = None
|
||||
|
||||
async def connect(self) -> bool:
|
||||
"""
|
||||
Start the WhatsApp bridge.
|
||||
|
||||
|
||||
This launches the Node.js bridge process and waits for it to be ready.
|
||||
"""
|
||||
if not check_whatsapp_requirements():
|
||||
logger.warning("[%s] Node.js not found. WhatsApp requires Node.js.", self.name)
|
||||
return False
|
||||
|
||||
|
||||
bridge_path = Path(self._bridge_script)
|
||||
if not bridge_path.exists():
|
||||
logger.warning("[%s] Bridge script not found: %s", self.name, bridge_path)
|
||||
return False
|
||||
|
||||
|
||||
logger.info("[%s] Bridge found at %s", self.name, bridge_path)
|
||||
|
||||
|
||||
# Auto-install npm dependencies if node_modules doesn't exist
|
||||
bridge_dir = bridge_path.parent
|
||||
if not (bridge_dir / "node_modules").exists():
|
||||
@@ -174,16 +174,17 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Failed to install dependencies: {e}")
|
||||
return False
|
||||
|
||||
|
||||
try:
|
||||
# Ensure session directory exists
|
||||
self._session_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# Kill any orphaned bridge from a previous gateway run
|
||||
_kill_port_process(self._bridge_port)
|
||||
import time
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# Start the bridge process in its own process group.
|
||||
# Route output to a log file so QR codes, errors, and reconnection
|
||||
# messages are preserved for troubleshooting.
|
||||
@@ -195,19 +196,23 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
[
|
||||
"node",
|
||||
str(bridge_path),
|
||||
"--port", str(self._bridge_port),
|
||||
"--session", str(self._session_path),
|
||||
"--mode", whatsapp_mode,
|
||||
"--port",
|
||||
str(self._bridge_port),
|
||||
"--session",
|
||||
str(self._session_path),
|
||||
"--mode",
|
||||
whatsapp_mode,
|
||||
],
|
||||
stdout=bridge_log_fh,
|
||||
stderr=bridge_log_fh,
|
||||
preexec_fn=None if _IS_WINDOWS else os.setsid,
|
||||
)
|
||||
|
||||
|
||||
# Wait for the bridge to connect to WhatsApp.
|
||||
# Phase 1: wait for the HTTP server to come up (up to 15s).
|
||||
# Phase 2: wait for WhatsApp status: connected (up to 15s more).
|
||||
import aiohttp
|
||||
|
||||
http_ready = False
|
||||
data = {}
|
||||
for attempt in range(15):
|
||||
@@ -218,17 +223,18 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
self._close_bridge_log()
|
||||
return False
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(
|
||||
f"http://localhost:{self._bridge_port}/health",
|
||||
timeout=aiohttp.ClientTimeout(total=2)
|
||||
) as resp:
|
||||
if resp.status == 200:
|
||||
http_ready = True
|
||||
data = await resp.json()
|
||||
if data.get("status") == "connected":
|
||||
print(f"[{self.name}] Bridge ready (status: connected)")
|
||||
break
|
||||
async with (
|
||||
aiohttp.ClientSession() as session,
|
||||
session.get(
|
||||
f"http://localhost:{self._bridge_port}/health", timeout=aiohttp.ClientTimeout(total=2)
|
||||
) as resp,
|
||||
):
|
||||
if resp.status == 200:
|
||||
http_ready = True
|
||||
data = await resp.json()
|
||||
if data.get("status") == "connected":
|
||||
print(f"[{self.name}] Bridge ready (status: connected)")
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
@@ -237,7 +243,7 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
print(f"[{self.name}] Check log: {self._bridge_log}")
|
||||
self._close_bridge_log()
|
||||
return False
|
||||
|
||||
|
||||
# Phase 2: HTTP is up but WhatsApp may still be connecting.
|
||||
# Give it more time to authenticate with saved credentials.
|
||||
if data.get("status") != "connected":
|
||||
@@ -250,16 +256,17 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
self._close_bridge_log()
|
||||
return False
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(
|
||||
f"http://localhost:{self._bridge_port}/health",
|
||||
timeout=aiohttp.ClientTimeout(total=2)
|
||||
) as resp:
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
if data.get("status") == "connected":
|
||||
print(f"[{self.name}] Bridge ready (status: connected)")
|
||||
break
|
||||
async with (
|
||||
aiohttp.ClientSession() as session,
|
||||
session.get(
|
||||
f"http://localhost:{self._bridge_port}/health", timeout=aiohttp.ClientTimeout(total=2)
|
||||
) as resp,
|
||||
):
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
if data.get("status") == "connected":
|
||||
print(f"[{self.name}] Bridge ready (status: connected)")
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
else:
|
||||
@@ -268,19 +275,19 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
print(f"[{self.name}] ⚠ WhatsApp not connected after 30s")
|
||||
print(f"[{self.name}] Bridge log: {self._bridge_log}")
|
||||
print(f"[{self.name}] If session expired, re-pair: hermes whatsapp")
|
||||
|
||||
|
||||
# Start message polling task
|
||||
asyncio.create_task(self._poll_messages())
|
||||
|
||||
|
||||
self._running = True
|
||||
print(f"[{self.name}] Bridge started on port {self._bridge_port}")
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error("[%s] Failed to start bridge: %s", self.name, e, exc_info=True)
|
||||
self._close_bridge_log()
|
||||
return False
|
||||
|
||||
|
||||
def _close_bridge_log(self) -> None:
|
||||
"""Close the bridge log file handle if open."""
|
||||
if self._bridge_log_fh:
|
||||
@@ -296,6 +303,7 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
try:
|
||||
# Kill the entire process group so child node processes die too
|
||||
import signal
|
||||
|
||||
try:
|
||||
if _IS_WINDOWS:
|
||||
self._bridge_process.terminate()
|
||||
@@ -314,29 +322,25 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
self._bridge_process.kill()
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Error stopping bridge: {e}")
|
||||
|
||||
|
||||
# Also kill any orphaned bridge processes on our port
|
||||
_kill_port_process(self._bridge_port)
|
||||
|
||||
|
||||
self._running = False
|
||||
self._bridge_process = None
|
||||
self._close_bridge_log()
|
||||
print(f"[{self.name}] Disconnected")
|
||||
|
||||
|
||||
async def send(
|
||||
self,
|
||||
chat_id: str,
|
||||
content: str,
|
||||
reply_to: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
self, chat_id: str, content: str, reply_to: str | None = None, metadata: dict[str, Any] | None = None
|
||||
) -> SendResult:
|
||||
"""Send a message via the WhatsApp bridge."""
|
||||
if not self._running:
|
||||
return SendResult(success=False, error="Not connected")
|
||||
|
||||
|
||||
try:
|
||||
import aiohttp
|
||||
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
payload = {
|
||||
"chatId": chat_id,
|
||||
@@ -344,28 +348,19 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
}
|
||||
if reply_to:
|
||||
payload["replyTo"] = reply_to
|
||||
|
||||
|
||||
async with session.post(
|
||||
f"http://localhost:{self._bridge_port}/send",
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=30)
|
||||
f"http://localhost:{self._bridge_port}/send", json=payload, timeout=aiohttp.ClientTimeout(total=30)
|
||||
) as resp:
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
return SendResult(
|
||||
success=True,
|
||||
message_id=data.get("messageId"),
|
||||
raw_response=data
|
||||
)
|
||||
return SendResult(success=True, message_id=data.get("messageId"), raw_response=data)
|
||||
else:
|
||||
error = await resp.text()
|
||||
return SendResult(success=False, error=error)
|
||||
|
||||
|
||||
except ImportError:
|
||||
return SendResult(
|
||||
success=False,
|
||||
error="aiohttp not installed. Run: pip install aiohttp"
|
||||
)
|
||||
return SendResult(success=False, error="aiohttp not installed. Run: pip install aiohttp")
|
||||
except Exception as e:
|
||||
return SendResult(success=False, error=str(e))
|
||||
|
||||
@@ -380,21 +375,24 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
return SendResult(success=False, error="Not connected")
|
||||
try:
|
||||
import aiohttp
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
|
||||
async with (
|
||||
aiohttp.ClientSession() as session,
|
||||
session.post(
|
||||
f"http://localhost:{self._bridge_port}/edit",
|
||||
json={
|
||||
"chatId": chat_id,
|
||||
"messageId": message_id,
|
||||
"message": content,
|
||||
},
|
||||
timeout=aiohttp.ClientTimeout(total=15)
|
||||
) as resp:
|
||||
if resp.status == 200:
|
||||
return SendResult(success=True, message_id=message_id)
|
||||
else:
|
||||
error = await resp.text()
|
||||
return SendResult(success=False, error=error)
|
||||
timeout=aiohttp.ClientTimeout(total=15),
|
||||
) as resp,
|
||||
):
|
||||
if resp.status == 200:
|
||||
return SendResult(success=True, message_id=message_id)
|
||||
else:
|
||||
error = await resp.text()
|
||||
return SendResult(success=False, error=error)
|
||||
except Exception as e:
|
||||
return SendResult(success=False, error=str(e))
|
||||
|
||||
@@ -403,8 +401,8 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
chat_id: str,
|
||||
file_path: str,
|
||||
media_type: str,
|
||||
caption: Optional[str] = None,
|
||||
file_name: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
file_name: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send any media file via bridge /send-media endpoint."""
|
||||
if not self._running:
|
||||
@@ -415,7 +413,7 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
if not os.path.exists(file_path):
|
||||
return SendResult(success=False, error=f"File not found: {file_path}")
|
||||
|
||||
payload: Dict[str, Any] = {
|
||||
payload: dict[str, Any] = {
|
||||
"chatId": chat_id,
|
||||
"filePath": file_path,
|
||||
"mediaType": media_type,
|
||||
@@ -425,22 +423,24 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
if file_name:
|
||||
payload["fileName"] = file_name
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
async with (
|
||||
aiohttp.ClientSession() as session,
|
||||
session.post(
|
||||
f"http://localhost:{self._bridge_port}/send-media",
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=120),
|
||||
) as resp:
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
return SendResult(
|
||||
success=True,
|
||||
message_id=data.get("messageId"),
|
||||
raw_response=data,
|
||||
)
|
||||
else:
|
||||
error = await resp.text()
|
||||
return SendResult(success=False, error=error)
|
||||
) as resp,
|
||||
):
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
return SendResult(
|
||||
success=True,
|
||||
message_id=data.get("messageId"),
|
||||
raw_response=data,
|
||||
)
|
||||
else:
|
||||
error = await resp.text()
|
||||
return SendResult(success=False, error=error)
|
||||
|
||||
except Exception as e:
|
||||
return SendResult(success=False, error=str(e))
|
||||
@@ -449,8 +449,8 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_url: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Download image URL to cache, send natively via bridge."""
|
||||
try:
|
||||
@@ -463,8 +463,8 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
image_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a local image file natively via bridge."""
|
||||
return await self._send_media_to_bridge(chat_id, image_path, "image", caption)
|
||||
@@ -473,8 +473,8 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
video_path: str,
|
||||
caption: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a video natively via bridge — plays inline in WhatsApp."""
|
||||
return await self._send_media_to_bridge(chat_id, video_path, "video", caption)
|
||||
@@ -483,13 +483,16 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
self,
|
||||
chat_id: str,
|
||||
file_path: str,
|
||||
caption: Optional[str] = None,
|
||||
file_name: Optional[str] = None,
|
||||
reply_to: Optional[str] = None,
|
||||
caption: str | None = None,
|
||||
file_name: str | None = None,
|
||||
reply_to: str | None = None,
|
||||
) -> SendResult:
|
||||
"""Send a document/file as a downloadable attachment via bridge."""
|
||||
return await self._send_media_to_bridge(
|
||||
chat_id, file_path, "document", caption,
|
||||
chat_id,
|
||||
file_path,
|
||||
"document",
|
||||
caption,
|
||||
file_name or os.path.basename(file_path),
|
||||
)
|
||||
|
||||
@@ -497,44 +500,45 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
"""Send typing indicator via bridge."""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
|
||||
try:
|
||||
import aiohttp
|
||||
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
await session.post(
|
||||
f"http://localhost:{self._bridge_port}/typing",
|
||||
json={"chatId": chat_id},
|
||||
timeout=aiohttp.ClientTimeout(total=5)
|
||||
timeout=aiohttp.ClientTimeout(total=5),
|
||||
)
|
||||
except Exception:
|
||||
pass # Ignore typing indicator failures
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> Dict[str, Any]:
|
||||
|
||||
async def get_chat_info(self, chat_id: str) -> dict[str, Any]:
|
||||
"""Get information about a WhatsApp chat."""
|
||||
if not self._running:
|
||||
return {"name": "Unknown", "type": "dm"}
|
||||
|
||||
|
||||
try:
|
||||
import aiohttp
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(
|
||||
f"http://localhost:{self._bridge_port}/chat/{chat_id}",
|
||||
timeout=aiohttp.ClientTimeout(total=10)
|
||||
) as resp:
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
return {
|
||||
"name": data.get("name", chat_id),
|
||||
"type": "group" if data.get("isGroup") else "dm",
|
||||
"participants": data.get("participants", []),
|
||||
}
|
||||
|
||||
async with (
|
||||
aiohttp.ClientSession() as session,
|
||||
session.get(
|
||||
f"http://localhost:{self._bridge_port}/chat/{chat_id}", timeout=aiohttp.ClientTimeout(total=10)
|
||||
) as resp,
|
||||
):
|
||||
if resp.status == 200:
|
||||
data = await resp.json()
|
||||
return {
|
||||
"name": data.get("name", chat_id),
|
||||
"type": "group" if data.get("isGroup") else "dm",
|
||||
"participants": data.get("participants", []),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug("Could not get WhatsApp chat info for %s: %s", chat_id, e)
|
||||
|
||||
|
||||
return {"name": chat_id, "type": "dm"}
|
||||
|
||||
|
||||
async def _poll_messages(self) -> None:
|
||||
"""Poll the bridge for incoming messages."""
|
||||
try:
|
||||
@@ -542,29 +546,30 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
except ImportError:
|
||||
print(f"[{self.name}] aiohttp not installed, message polling disabled")
|
||||
return
|
||||
|
||||
|
||||
while self._running:
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(
|
||||
f"http://localhost:{self._bridge_port}/messages",
|
||||
timeout=aiohttp.ClientTimeout(total=30)
|
||||
) as resp:
|
||||
if resp.status == 200:
|
||||
messages = await resp.json()
|
||||
for msg_data in messages:
|
||||
event = await self._build_message_event(msg_data)
|
||||
if event:
|
||||
await self.handle_message(event)
|
||||
async with (
|
||||
aiohttp.ClientSession() as session,
|
||||
session.get(
|
||||
f"http://localhost:{self._bridge_port}/messages", timeout=aiohttp.ClientTimeout(total=30)
|
||||
) as resp,
|
||||
):
|
||||
if resp.status == 200:
|
||||
messages = await resp.json()
|
||||
for msg_data in messages:
|
||||
event = await self._build_message_event(msg_data)
|
||||
if event:
|
||||
await self.handle_message(event)
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Poll error: {e}")
|
||||
await asyncio.sleep(5)
|
||||
|
||||
|
||||
await asyncio.sleep(1) # Poll interval
|
||||
|
||||
async def _build_message_event(self, data: Dict[str, Any]) -> Optional[MessageEvent]:
|
||||
|
||||
async def _build_message_event(self, data: dict[str, Any]) -> MessageEvent | None:
|
||||
"""Build a MessageEvent from bridge message data, downloading images to cache."""
|
||||
try:
|
||||
# Determine message type
|
||||
@@ -579,11 +584,11 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
msg_type = MessageType.VOICE
|
||||
else:
|
||||
msg_type = MessageType.DOCUMENT
|
||||
|
||||
|
||||
# Determine chat type
|
||||
is_group = data.get("isGroup", False)
|
||||
chat_type = "group" if is_group else "dm"
|
||||
|
||||
|
||||
# Build source
|
||||
source = self.build_source(
|
||||
chat_id=data.get("chatId", ""),
|
||||
@@ -592,7 +597,7 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
user_id=data.get("senderId"),
|
||||
user_name=data.get("senderName"),
|
||||
)
|
||||
|
||||
|
||||
# Download image media URLs to the local cache so the vision tool
|
||||
# can access them reliably regardless of URL expiration.
|
||||
raw_urls = data.get("mediaUrls", [])
|
||||
@@ -622,7 +627,7 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
else:
|
||||
cached_urls.append(url)
|
||||
media_types.append("unknown")
|
||||
|
||||
|
||||
return MessageEvent(
|
||||
text=data.get("body", ""),
|
||||
message_type=msg_type,
|
||||
@@ -635,4 +640,3 @@ class WhatsAppAdapter(BasePlatformAdapter):
|
||||
except Exception as e:
|
||||
print(f"[{self.name}] Error building event: {e}")
|
||||
return None
|
||||
|
||||
|
||||
863
gateway/run.py
863
gateway/run.py
File diff suppressed because it is too large
Load Diff
@@ -8,22 +8,20 @@ Handles:
|
||||
- Dynamic system prompt injection (agent knows its context)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional, Any
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from .config import (
|
||||
Platform,
|
||||
GatewayConfig,
|
||||
SessionResetPolicy,
|
||||
HomeChannel,
|
||||
Platform,
|
||||
)
|
||||
|
||||
|
||||
@@ -31,29 +29,30 @@ from .config import (
|
||||
class SessionSource:
|
||||
"""
|
||||
Describes where a message originated from.
|
||||
|
||||
|
||||
This information is used to:
|
||||
1. Route responses back to the right place
|
||||
2. Inject context into the system prompt
|
||||
3. Track origin for cron job delivery
|
||||
"""
|
||||
|
||||
platform: Platform
|
||||
chat_id: str
|
||||
chat_name: Optional[str] = None
|
||||
chat_name: str | None = None
|
||||
chat_type: str = "dm" # "dm", "group", "channel", "thread"
|
||||
user_id: Optional[str] = None
|
||||
user_name: Optional[str] = None
|
||||
thread_id: Optional[str] = None # For forum topics, Discord threads, etc.
|
||||
chat_topic: Optional[str] = None # Channel topic/description (Discord, Slack)
|
||||
user_id_alt: Optional[str] = None # Signal UUID (alternative to phone number)
|
||||
chat_id_alt: Optional[str] = None # Signal group internal ID
|
||||
|
||||
user_id: str | None = None
|
||||
user_name: str | None = None
|
||||
thread_id: str | None = None # For forum topics, Discord threads, etc.
|
||||
chat_topic: str | None = None # Channel topic/description (Discord, Slack)
|
||||
user_id_alt: str | None = None # Signal UUID (alternative to phone number)
|
||||
chat_id_alt: str | None = None # Signal group internal ID
|
||||
|
||||
@property
|
||||
def description(self) -> str:
|
||||
"""Human-readable description of the source."""
|
||||
if self.platform == Platform.LOCAL:
|
||||
return "CLI terminal"
|
||||
|
||||
|
||||
parts = []
|
||||
if self.chat_type == "dm":
|
||||
parts.append(f"DM with {self.user_name or self.user_id or 'user'}")
|
||||
@@ -63,13 +62,13 @@ class SessionSource:
|
||||
parts.append(f"channel: {self.chat_name or self.chat_id}")
|
||||
else:
|
||||
parts.append(self.chat_name or self.chat_id)
|
||||
|
||||
|
||||
if self.thread_id:
|
||||
parts.append(f"thread: {self.thread_id}")
|
||||
|
||||
|
||||
return ", ".join(parts)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
d = {
|
||||
"platform": self.platform.value,
|
||||
"chat_id": self.chat_id,
|
||||
@@ -85,9 +84,9 @@ class SessionSource:
|
||||
if self.chat_id_alt:
|
||||
d["chat_id_alt"] = self.chat_id_alt
|
||||
return d
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SessionSource":
|
||||
def from_dict(cls, data: dict[str, Any]) -> "SessionSource":
|
||||
return cls(
|
||||
platform=Platform(data["platform"]),
|
||||
chat_id=str(data["chat_id"]),
|
||||
@@ -100,7 +99,7 @@ class SessionSource:
|
||||
user_id_alt=data.get("user_id_alt"),
|
||||
chat_id_alt=data.get("chat_id_alt"),
|
||||
)
|
||||
|
||||
|
||||
@classmethod
|
||||
def local_cli(cls) -> "SessionSource":
|
||||
"""Create a source representing the local CLI."""
|
||||
@@ -116,29 +115,28 @@ class SessionSource:
|
||||
class SessionContext:
|
||||
"""
|
||||
Full context for a session, used for dynamic system prompt injection.
|
||||
|
||||
|
||||
The agent receives this information to understand:
|
||||
- Where messages are coming from
|
||||
- What platforms are available
|
||||
- Where it can deliver scheduled task outputs
|
||||
"""
|
||||
|
||||
source: SessionSource
|
||||
connected_platforms: List[Platform]
|
||||
home_channels: Dict[Platform, HomeChannel]
|
||||
|
||||
connected_platforms: list[Platform]
|
||||
home_channels: dict[Platform, HomeChannel]
|
||||
|
||||
# Session metadata
|
||||
session_key: str = ""
|
||||
session_id: str = ""
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
created_at: datetime | None = None
|
||||
updated_at: datetime | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"source": self.source.to_dict(),
|
||||
"connected_platforms": [p.value for p in self.connected_platforms],
|
||||
"home_channels": {
|
||||
p.value: hc.to_dict() for p, hc in self.home_channels.items()
|
||||
},
|
||||
"home_channels": {p.value: hc.to_dict() for p, hc in self.home_channels.items()},
|
||||
"session_key": self.session_key,
|
||||
"session_id": self.session_id,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
@@ -149,7 +147,7 @@ class SessionContext:
|
||||
def build_session_context_prompt(context: SessionContext) -> str:
|
||||
"""
|
||||
Build the dynamic system prompt section that tells the agent about its context.
|
||||
|
||||
|
||||
This is injected into the system prompt so the agent knows:
|
||||
- Where messages are coming from
|
||||
- What platforms are connected
|
||||
@@ -159,14 +157,14 @@ def build_session_context_prompt(context: SessionContext) -> str:
|
||||
"## Current Session Context",
|
||||
"",
|
||||
]
|
||||
|
||||
|
||||
# Source info
|
||||
platform_name = context.source.platform.value.title()
|
||||
if context.source.platform == Platform.LOCAL:
|
||||
lines.append(f"**Source:** {platform_name} (the machine running this agent)")
|
||||
else:
|
||||
lines.append(f"**Source:** {platform_name} ({context.source.description})")
|
||||
|
||||
|
||||
# Channel topic (if available - provides context about the channel's purpose)
|
||||
if context.source.chat_topic:
|
||||
lines.append(f"**Channel Topic:** {context.source.chat_topic}")
|
||||
@@ -176,43 +174,43 @@ def build_session_context_prompt(context: SessionContext) -> str:
|
||||
lines.append(f"**User:** {context.source.user_name}")
|
||||
elif context.source.user_id:
|
||||
lines.append(f"**User ID:** {context.source.user_id}")
|
||||
|
||||
|
||||
# Connected platforms
|
||||
platforms_list = ["local (files on this machine)"]
|
||||
for p in context.connected_platforms:
|
||||
if p != Platform.LOCAL:
|
||||
platforms_list.append(f"{p.value}: Connected ✓")
|
||||
|
||||
|
||||
lines.append(f"**Connected Platforms:** {', '.join(platforms_list)}")
|
||||
|
||||
|
||||
# Home channels
|
||||
if context.home_channels:
|
||||
lines.append("")
|
||||
lines.append("**Home Channels (default destinations):**")
|
||||
for platform, home in context.home_channels.items():
|
||||
lines.append(f" - {platform.value}: {home.name} (ID: {home.chat_id})")
|
||||
|
||||
|
||||
# Delivery options for scheduled tasks
|
||||
lines.append("")
|
||||
lines.append("**Delivery options for scheduled tasks:**")
|
||||
|
||||
|
||||
# Origin delivery
|
||||
if context.source.platform == Platform.LOCAL:
|
||||
lines.append("- `\"origin\"` → Local output (saved to files)")
|
||||
lines.append('- `"origin"` → Local output (saved to files)')
|
||||
else:
|
||||
lines.append(f"- `\"origin\"` → Back to this chat ({context.source.chat_name or context.source.chat_id})")
|
||||
|
||||
lines.append(f'- `"origin"` → Back to this chat ({context.source.chat_name or context.source.chat_id})')
|
||||
|
||||
# Local always available
|
||||
lines.append("- `\"local\"` → Save to local files only (~/.hermes/cron/output/)")
|
||||
|
||||
lines.append('- `"local"` → Save to local files only (~/.hermes/cron/output/)')
|
||||
|
||||
# Platform home channels
|
||||
for platform, home in context.home_channels.items():
|
||||
lines.append(f"- `\"{platform.value}\"` → Home channel ({home.name})")
|
||||
|
||||
lines.append(f'- `"{platform.value}"` → Home channel ({home.name})')
|
||||
|
||||
# Note about explicit targeting
|
||||
lines.append("")
|
||||
lines.append("*For explicit targeting, use `\"platform:chat_id\"` format if the user provides a specific chat ID.*")
|
||||
|
||||
lines.append('*For explicit targeting, use `"platform:chat_id"` format if the user provides a specific chat ID.*')
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
@@ -220,32 +218,33 @@ def build_session_context_prompt(context: SessionContext) -> str:
|
||||
class SessionEntry:
|
||||
"""
|
||||
Entry in the session store.
|
||||
|
||||
|
||||
Maps a session key to its current session ID and metadata.
|
||||
"""
|
||||
|
||||
session_key: str
|
||||
session_id: str
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
|
||||
# Origin metadata for delivery routing
|
||||
origin: Optional[SessionSource] = None
|
||||
|
||||
origin: SessionSource | None = None
|
||||
|
||||
# Display metadata
|
||||
display_name: Optional[str] = None
|
||||
platform: Optional[Platform] = None
|
||||
display_name: str | None = None
|
||||
platform: Platform | None = None
|
||||
chat_type: str = "dm"
|
||||
|
||||
|
||||
# Token tracking
|
||||
input_tokens: int = 0
|
||||
output_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
|
||||
|
||||
# Set when a session was created because the previous one expired;
|
||||
# consumed once by the message handler to inject a notice into context
|
||||
was_auto_reset: bool = False
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = {
|
||||
"session_key": self.session_key,
|
||||
"session_id": self.session_id,
|
||||
@@ -261,20 +260,20 @@ class SessionEntry:
|
||||
if self.origin:
|
||||
result["origin"] = self.origin.to_dict()
|
||||
return result
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SessionEntry":
|
||||
def from_dict(cls, data: dict[str, Any]) -> "SessionEntry":
|
||||
origin = None
|
||||
if "origin" in data and data["origin"]:
|
||||
origin = SessionSource.from_dict(data["origin"])
|
||||
|
||||
|
||||
platform = None
|
||||
if data.get("platform"):
|
||||
try:
|
||||
platform = Platform(data["platform"])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
return cls(
|
||||
session_key=data["session_key"],
|
||||
session_id=data["session_id"],
|
||||
@@ -307,66 +306,65 @@ def build_session_key(source: SessionSource) -> str:
|
||||
class SessionStore:
|
||||
"""
|
||||
Manages session storage and retrieval.
|
||||
|
||||
|
||||
Uses SQLite (via SessionDB) for session metadata and message transcripts.
|
||||
Falls back to legacy JSONL files if SQLite is unavailable.
|
||||
"""
|
||||
|
||||
def __init__(self, sessions_dir: Path, config: GatewayConfig,
|
||||
has_active_processes_fn=None,
|
||||
on_auto_reset=None):
|
||||
|
||||
def __init__(self, sessions_dir: Path, config: GatewayConfig, has_active_processes_fn=None, on_auto_reset=None):
|
||||
self.sessions_dir = sessions_dir
|
||||
self.config = config
|
||||
self._entries: Dict[str, SessionEntry] = {}
|
||||
self._entries: dict[str, SessionEntry] = {}
|
||||
self._loaded = False
|
||||
self._has_active_processes_fn = has_active_processes_fn
|
||||
# on_auto_reset is deprecated — memory flush now runs proactively
|
||||
# via the background session expiry watcher in GatewayRunner.
|
||||
self._pre_flushed_sessions: set = set() # session_ids already flushed by watcher
|
||||
|
||||
|
||||
# Initialize SQLite session database
|
||||
self._db = None
|
||||
try:
|
||||
from hermes_state import SessionDB
|
||||
|
||||
self._db = SessionDB()
|
||||
except Exception as e:
|
||||
print(f"[gateway] Warning: SQLite session store unavailable, falling back to JSONL: {e}")
|
||||
|
||||
|
||||
def _ensure_loaded(self) -> None:
|
||||
"""Load sessions index from disk if not already loaded."""
|
||||
if self._loaded:
|
||||
return
|
||||
|
||||
|
||||
self.sessions_dir.mkdir(parents=True, exist_ok=True)
|
||||
sessions_file = self.sessions_dir / "sessions.json"
|
||||
|
||||
|
||||
if sessions_file.exists():
|
||||
try:
|
||||
with open(sessions_file, "r", encoding="utf-8") as f:
|
||||
with open(sessions_file, encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
for key, entry_data in data.items():
|
||||
self._entries[key] = SessionEntry.from_dict(entry_data)
|
||||
except Exception as e:
|
||||
print(f"[gateway] Warning: Failed to load sessions: {e}")
|
||||
|
||||
|
||||
self._loaded = True
|
||||
|
||||
|
||||
def _save(self) -> None:
|
||||
"""Save sessions index to disk (kept for session key -> ID mapping)."""
|
||||
self.sessions_dir.mkdir(parents=True, exist_ok=True)
|
||||
sessions_file = self.sessions_dir / "sessions.json"
|
||||
|
||||
|
||||
data = {key: entry.to_dict() for key, entry in self._entries.items()}
|
||||
with open(sessions_file, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
|
||||
def _generate_session_key(self, source: SessionSource) -> str:
|
||||
"""Generate a session key from a source."""
|
||||
return build_session_key(source)
|
||||
|
||||
|
||||
def _is_session_expired(self, entry: SessionEntry) -> bool:
|
||||
"""Check if a session has expired based on its reset policy.
|
||||
|
||||
|
||||
Works from the entry alone — no SessionSource needed.
|
||||
Used by the background expiry watcher to proactively flush memories.
|
||||
Sessions with active background processes are never considered expired.
|
||||
@@ -393,7 +391,9 @@ class SessionStore:
|
||||
if policy.mode in ("daily", "both"):
|
||||
today_reset = now.replace(
|
||||
hour=policy.at_hour,
|
||||
minute=0, second=0, microsecond=0,
|
||||
minute=0,
|
||||
second=0,
|
||||
microsecond=0,
|
||||
)
|
||||
if now.hour < policy.at_hour:
|
||||
today_reset -= timedelta(days=1)
|
||||
@@ -405,7 +405,7 @@ class SessionStore:
|
||||
def _should_reset(self, entry: SessionEntry, source: SessionSource) -> bool:
|
||||
"""
|
||||
Check if a session should be reset based on policy.
|
||||
|
||||
|
||||
Sessions with active background processes are never reset.
|
||||
"""
|
||||
if self._has_active_processes_fn:
|
||||
@@ -413,36 +413,28 @@ class SessionStore:
|
||||
if self._has_active_processes_fn(session_key):
|
||||
return False
|
||||
|
||||
policy = self.config.get_reset_policy(
|
||||
platform=source.platform,
|
||||
session_type=source.chat_type
|
||||
)
|
||||
|
||||
policy = self.config.get_reset_policy(platform=source.platform, session_type=source.chat_type)
|
||||
|
||||
if policy.mode == "none":
|
||||
return False
|
||||
|
||||
|
||||
now = datetime.now()
|
||||
|
||||
|
||||
if policy.mode in ("idle", "both"):
|
||||
idle_deadline = entry.updated_at + timedelta(minutes=policy.idle_minutes)
|
||||
if now > idle_deadline:
|
||||
return True
|
||||
|
||||
|
||||
if policy.mode in ("daily", "both"):
|
||||
today_reset = now.replace(
|
||||
hour=policy.at_hour,
|
||||
minute=0,
|
||||
second=0,
|
||||
microsecond=0
|
||||
)
|
||||
today_reset = now.replace(hour=policy.at_hour, minute=0, second=0, microsecond=0)
|
||||
if now.hour < policy.at_hour:
|
||||
today_reset -= timedelta(days=1)
|
||||
|
||||
|
||||
if entry.updated_at < today_reset:
|
||||
return True
|
||||
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def has_any_sessions(self) -> bool:
|
||||
"""Check if any sessions have ever been created (across all platforms).
|
||||
|
||||
@@ -463,26 +455,22 @@ class SessionStore:
|
||||
# This covers the rare case where the DB is unavailable.
|
||||
self._ensure_loaded()
|
||||
return len(self._entries) > 1
|
||||
|
||||
def get_or_create_session(
|
||||
self,
|
||||
source: SessionSource,
|
||||
force_new: bool = False
|
||||
) -> SessionEntry:
|
||||
|
||||
def get_or_create_session(self, source: SessionSource, force_new: bool = False) -> SessionEntry:
|
||||
"""
|
||||
Get an existing session or create a new one.
|
||||
|
||||
|
||||
Evaluates reset policy to determine if the existing session is stale.
|
||||
Creates a session record in SQLite when a new session starts.
|
||||
"""
|
||||
self._ensure_loaded()
|
||||
|
||||
|
||||
session_key = self._generate_session_key(source)
|
||||
now = datetime.now()
|
||||
|
||||
|
||||
if session_key in self._entries and not force_new:
|
||||
entry = self._entries[session_key]
|
||||
|
||||
|
||||
if not self._should_reset(entry, source):
|
||||
entry.updated_at = now
|
||||
self._save()
|
||||
@@ -500,10 +488,10 @@ class SessionStore:
|
||||
logger.debug("Session DB operation failed: %s", e)
|
||||
else:
|
||||
was_auto_reset = False
|
||||
|
||||
|
||||
# Create new session
|
||||
session_id = f"{now.strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4().hex[:8]}"
|
||||
|
||||
|
||||
entry = SessionEntry(
|
||||
session_key=session_key,
|
||||
session_id=session_id,
|
||||
@@ -515,10 +503,10 @@ class SessionStore:
|
||||
chat_type=source.chat_type,
|
||||
was_auto_reset=was_auto_reset,
|
||||
)
|
||||
|
||||
|
||||
self._entries[session_key] = entry
|
||||
self._save()
|
||||
|
||||
|
||||
# Create session in SQLite
|
||||
if self._db:
|
||||
try:
|
||||
@@ -529,18 +517,13 @@ class SessionStore:
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"[gateway] Warning: Failed to create SQLite session: {e}")
|
||||
|
||||
|
||||
return entry
|
||||
|
||||
def update_session(
|
||||
self,
|
||||
session_key: str,
|
||||
input_tokens: int = 0,
|
||||
output_tokens: int = 0
|
||||
) -> None:
|
||||
|
||||
def update_session(self, session_key: str, input_tokens: int = 0, output_tokens: int = 0) -> None:
|
||||
"""Update a session's metadata after an interaction."""
|
||||
self._ensure_loaded()
|
||||
|
||||
|
||||
if session_key in self._entries:
|
||||
entry = self._entries[session_key]
|
||||
entry.updated_at = datetime.now()
|
||||
@@ -548,34 +531,32 @@ class SessionStore:
|
||||
entry.output_tokens += output_tokens
|
||||
entry.total_tokens = entry.input_tokens + entry.output_tokens
|
||||
self._save()
|
||||
|
||||
|
||||
if self._db:
|
||||
try:
|
||||
self._db.update_token_counts(
|
||||
entry.session_id, input_tokens, output_tokens
|
||||
)
|
||||
self._db.update_token_counts(entry.session_id, input_tokens, output_tokens)
|
||||
except Exception as e:
|
||||
logger.debug("Session DB operation failed: %s", e)
|
||||
|
||||
def reset_session(self, session_key: str) -> Optional[SessionEntry]:
|
||||
|
||||
def reset_session(self, session_key: str) -> SessionEntry | None:
|
||||
"""Force reset a session, creating a new session ID."""
|
||||
self._ensure_loaded()
|
||||
|
||||
|
||||
if session_key not in self._entries:
|
||||
return None
|
||||
|
||||
|
||||
old_entry = self._entries[session_key]
|
||||
|
||||
|
||||
# End old session in SQLite
|
||||
if self._db:
|
||||
try:
|
||||
self._db.end_session(old_entry.session_id, "session_reset")
|
||||
except Exception as e:
|
||||
logger.debug("Session DB operation failed: %s", e)
|
||||
|
||||
|
||||
now = datetime.now()
|
||||
session_id = f"{now.strftime('%Y%m%d_%H%M%S')}_{uuid.uuid4().hex[:8]}"
|
||||
|
||||
|
||||
new_entry = SessionEntry(
|
||||
session_key=session_key,
|
||||
session_id=session_id,
|
||||
@@ -586,10 +567,10 @@ class SessionStore:
|
||||
platform=old_entry.platform,
|
||||
chat_type=old_entry.chat_type,
|
||||
)
|
||||
|
||||
|
||||
self._entries[session_key] = new_entry
|
||||
self._save()
|
||||
|
||||
|
||||
# Create new session in SQLite
|
||||
if self._db:
|
||||
try:
|
||||
@@ -600,10 +581,10 @@ class SessionStore:
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Session DB operation failed: %s", e)
|
||||
|
||||
|
||||
return new_entry
|
||||
|
||||
def switch_session(self, session_key: str, target_session_id: str) -> Optional[SessionEntry]:
|
||||
def switch_session(self, session_key: str, target_session_id: str) -> SessionEntry | None:
|
||||
"""Switch a session key to point at an existing session ID.
|
||||
|
||||
Used by ``/resume`` to restore a previously-named session.
|
||||
@@ -645,25 +626,25 @@ class SessionStore:
|
||||
self._save()
|
||||
return new_entry
|
||||
|
||||
def list_sessions(self, active_minutes: Optional[int] = None) -> List[SessionEntry]:
|
||||
def list_sessions(self, active_minutes: int | None = None) -> list[SessionEntry]:
|
||||
"""List all sessions, optionally filtered by activity."""
|
||||
self._ensure_loaded()
|
||||
|
||||
|
||||
entries = list(self._entries.values())
|
||||
|
||||
|
||||
if active_minutes is not None:
|
||||
cutoff = datetime.now() - timedelta(minutes=active_minutes)
|
||||
entries = [e for e in entries if e.updated_at >= cutoff]
|
||||
|
||||
|
||||
entries.sort(key=lambda e: e.updated_at, reverse=True)
|
||||
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
def get_transcript_path(self, session_id: str) -> Path:
|
||||
"""Get the path to a session's legacy transcript file."""
|
||||
return self.sessions_dir / f"{session_id}.jsonl"
|
||||
|
||||
def append_to_transcript(self, session_id: str, message: Dict[str, Any]) -> None:
|
||||
|
||||
def append_to_transcript(self, session_id: str, message: dict[str, Any]) -> None:
|
||||
"""Append a message to a session's transcript (SQLite + legacy JSONL)."""
|
||||
# Write to SQLite
|
||||
if self._db:
|
||||
@@ -678,15 +659,15 @@ class SessionStore:
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Session DB operation failed: %s", e)
|
||||
|
||||
|
||||
# Also write legacy JSONL (keeps existing tooling working during transition)
|
||||
transcript_path = self.get_transcript_path(session_id)
|
||||
with open(transcript_path, "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(message, ensure_ascii=False) + "\n")
|
||||
|
||||
def rewrite_transcript(self, session_id: str, messages: List[Dict[str, Any]]) -> None:
|
||||
|
||||
def rewrite_transcript(self, session_id: str, messages: list[dict[str, Any]]) -> None:
|
||||
"""Replace the entire transcript for a session with new messages.
|
||||
|
||||
|
||||
Used by /retry, /undo, and /compress to persist modified conversation history.
|
||||
Rewrites both SQLite and legacy JSONL storage.
|
||||
"""
|
||||
@@ -705,14 +686,14 @@ class SessionStore:
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Failed to rewrite transcript in DB: %s", e)
|
||||
|
||||
|
||||
# JSONL: overwrite the file
|
||||
transcript_path = self.get_transcript_path(session_id)
|
||||
with open(transcript_path, "w", encoding="utf-8") as f:
|
||||
for msg in messages:
|
||||
f.write(json.dumps(msg, ensure_ascii=False) + "\n")
|
||||
|
||||
def load_transcript(self, session_id: str) -> List[Dict[str, Any]]:
|
||||
def load_transcript(self, session_id: str) -> list[dict[str, Any]]:
|
||||
"""Load all messages from a session's transcript."""
|
||||
# Try SQLite first
|
||||
if self._db:
|
||||
@@ -722,51 +703,49 @@ class SessionStore:
|
||||
return messages
|
||||
except Exception as e:
|
||||
logger.debug("Could not load messages from DB: %s", e)
|
||||
|
||||
|
||||
# Fall back to legacy JSONL
|
||||
transcript_path = self.get_transcript_path(session_id)
|
||||
|
||||
|
||||
if not transcript_path.exists():
|
||||
return []
|
||||
|
||||
|
||||
messages = []
|
||||
with open(transcript_path, "r", encoding="utf-8") as f:
|
||||
with open(transcript_path, encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line:
|
||||
messages.append(json.loads(line))
|
||||
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
def build_session_context(
|
||||
source: SessionSource,
|
||||
config: GatewayConfig,
|
||||
session_entry: Optional[SessionEntry] = None
|
||||
source: SessionSource, config: GatewayConfig, session_entry: SessionEntry | None = None
|
||||
) -> SessionContext:
|
||||
"""
|
||||
Build a full session context from a source and config.
|
||||
|
||||
|
||||
This is used to inject context into the agent's system prompt.
|
||||
"""
|
||||
connected = config.get_connected_platforms()
|
||||
|
||||
|
||||
home_channels = {}
|
||||
for platform in connected:
|
||||
home = config.get_home_channel(platform)
|
||||
if home:
|
||||
home_channels[platform] = home
|
||||
|
||||
|
||||
context = SessionContext(
|
||||
source=source,
|
||||
connected_platforms=connected,
|
||||
home_channels=home_channels,
|
||||
)
|
||||
|
||||
|
||||
if session_entry:
|
||||
context.session_key = session_entry.session_key
|
||||
context.session_id = session_entry.session_id
|
||||
context.created_at = session_entry.created_at
|
||||
context.updated_at = session_entry.updated_at
|
||||
|
||||
|
||||
return context
|
||||
|
||||
@@ -13,7 +13,6 @@ concurrently under distinct configurations).
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def _get_pid_path() -> Path:
|
||||
@@ -37,7 +36,7 @@ def remove_pid_file() -> None:
|
||||
pass
|
||||
|
||||
|
||||
def get_running_pid() -> Optional[int]:
|
||||
def get_running_pid() -> int | None:
|
||||
"""Return the PID of a running gateway instance, or ``None``.
|
||||
|
||||
Checks the PID file and verifies the process is actually alive.
|
||||
|
||||
@@ -12,8 +12,6 @@ import json
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
CACHE_PATH = Path(os.path.expanduser("~/.hermes/sticker_cache.json"))
|
||||
|
||||
@@ -43,7 +41,7 @@ def _save_cache(cache: dict) -> None:
|
||||
)
|
||||
|
||||
|
||||
def get_cached_description(file_unique_id: str) -> Optional[dict]:
|
||||
def get_cached_description(file_unique_id: str) -> dict | None:
|
||||
"""
|
||||
Look up a cached sticker description.
|
||||
|
||||
@@ -92,11 +90,11 @@ def build_sticker_injection(
|
||||
"""
|
||||
context = ""
|
||||
if set_name and emoji:
|
||||
context = f" {emoji} from \"{set_name}\""
|
||||
context = f' {emoji} from "{set_name}"'
|
||||
elif emoji:
|
||||
context = f" {emoji}"
|
||||
|
||||
return f"[The user sent a sticker{context}~ It shows: \"{description}\" (=^.w.^=)]"
|
||||
return f'[The user sent a sticker{context}~ It shows: "{description}" (=^.w.^=)]'
|
||||
|
||||
|
||||
def build_animated_sticker_injection(emoji: str = "") -> str:
|
||||
|
||||
@@ -5,7 +5,7 @@ Provides subcommands for:
|
||||
- hermes chat - Interactive chat (same as ./hermes)
|
||||
- hermes gateway - Run gateway in foreground
|
||||
- hermes gateway start - Start gateway service
|
||||
- hermes gateway stop - Stop gateway service
|
||||
- hermes gateway stop - Stop gateway service
|
||||
- hermes setup - Interactive setup wizard
|
||||
- hermes status - Show status of all components
|
||||
- hermes cron - Manage cron jobs
|
||||
|
||||
@@ -15,27 +15,25 @@ Architecture:
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import base64
|
||||
import hashlib
|
||||
import subprocess
|
||||
import time
|
||||
import uuid
|
||||
import webbrowser
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
import yaml
|
||||
|
||||
from hermes_cli.config import get_hermes_home, get_config_path
|
||||
from hermes_cli.config import get_config_path, get_hermes_home
|
||||
from hermes_constants import OPENROUTER_BASE_URL
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -58,8 +56,8 @@ DEFAULT_NOUS_INFERENCE_URL = "https://inference-api.nousresearch.com/v1"
|
||||
DEFAULT_NOUS_CLIENT_ID = "hermes-cli"
|
||||
DEFAULT_NOUS_SCOPE = "inference:mint_agent_key"
|
||||
DEFAULT_AGENT_KEY_MIN_TTL_SECONDS = 30 * 60 # 30 minutes
|
||||
ACCESS_TOKEN_REFRESH_SKEW_SECONDS = 120 # refresh 2 min before expiry
|
||||
DEVICE_AUTH_POLL_INTERVAL_CAP_SECONDS = 1 # poll at most every 1s
|
||||
ACCESS_TOKEN_REFRESH_SKEW_SECONDS = 120 # refresh 2 min before expiry
|
||||
DEVICE_AUTH_POLL_INTERVAL_CAP_SECONDS = 1 # poll at most every 1s
|
||||
DEFAULT_CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex"
|
||||
CODEX_OAUTH_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
|
||||
CODEX_OAUTH_TOKEN_URL = "https://auth.openai.com/oauth/token"
|
||||
@@ -70,9 +68,11 @@ CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS = 120
|
||||
# Provider Registry
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProviderConfig:
|
||||
"""Describes a known inference provider."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
auth_type: str # "oauth_device_code", "oauth_external", or "api_key"
|
||||
@@ -80,14 +80,14 @@ class ProviderConfig:
|
||||
inference_base_url: str = ""
|
||||
client_id: str = ""
|
||||
scope: str = ""
|
||||
extra: Dict[str, Any] = field(default_factory=dict)
|
||||
extra: dict[str, Any] = field(default_factory=dict)
|
||||
# For API-key providers: env vars to check (in priority order)
|
||||
api_key_env_vars: tuple = ()
|
||||
# Optional env var for base URL override
|
||||
base_url_env_var: str = ""
|
||||
|
||||
|
||||
PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
|
||||
PROVIDER_REGISTRY: dict[str, ProviderConfig] = {
|
||||
"nous": ProviderConfig(
|
||||
id="nous",
|
||||
name="Nous Portal",
|
||||
@@ -172,14 +172,14 @@ def _resolve_kimi_base_url(api_key: str, default_url: str, env_override: str) ->
|
||||
|
||||
ZAI_ENDPOINTS = [
|
||||
# (id, base_url, default_model, label)
|
||||
("global", "https://api.z.ai/api/paas/v4", "glm-5", "Global"),
|
||||
("cn", "https://open.bigmodel.cn/api/paas/v4", "glm-5", "China"),
|
||||
("coding-global", "https://api.z.ai/api/coding/paas/v4", "glm-4.7", "Global (Coding Plan)"),
|
||||
("coding-cn", "https://open.bigmodel.cn/api/coding/paas/v4", "glm-4.7", "China (Coding Plan)"),
|
||||
("global", "https://api.z.ai/api/paas/v4", "glm-5", "Global"),
|
||||
("cn", "https://open.bigmodel.cn/api/paas/v4", "glm-5", "China"),
|
||||
("coding-global", "https://api.z.ai/api/coding/paas/v4", "glm-4.7", "Global (Coding Plan)"),
|
||||
("coding-cn", "https://open.bigmodel.cn/api/coding/paas/v4", "glm-4.7", "China (Coding Plan)"),
|
||||
]
|
||||
|
||||
|
||||
def detect_zai_endpoint(api_key: str, timeout: float = 8.0) -> Optional[Dict[str, str]]:
|
||||
def detect_zai_endpoint(api_key: str, timeout: float = 8.0) -> dict[str, str] | None:
|
||||
"""Probe z.ai endpoints to find one that accepts this API key.
|
||||
|
||||
Returns {"id": ..., "base_url": ..., "model": ..., "label": ...} for the
|
||||
@@ -219,6 +219,7 @@ def detect_zai_endpoint(api_key: str, timeout: float = 8.0) -> Optional[Dict[str
|
||||
# Error Types
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class AuthError(RuntimeError):
|
||||
"""Structured auth error with UX mapping hints."""
|
||||
|
||||
@@ -227,7 +228,7 @@ class AuthError(RuntimeError):
|
||||
message: str,
|
||||
*,
|
||||
provider: str = "",
|
||||
code: Optional[str] = None,
|
||||
code: str | None = None,
|
||||
relogin_required: bool = False,
|
||||
) -> None:
|
||||
super().__init__(message)
|
||||
@@ -245,16 +246,10 @@ def format_auth_error(error: Exception) -> str:
|
||||
return f"{error} Run `hermes model` to re-authenticate."
|
||||
|
||||
if error.code == "subscription_required":
|
||||
return (
|
||||
"No active paid subscription found on Nous Portal. "
|
||||
"Please purchase/activate a subscription, then retry."
|
||||
)
|
||||
return "No active paid subscription found on Nous Portal. Please purchase/activate a subscription, then retry."
|
||||
|
||||
if error.code == "insufficient_credits":
|
||||
return (
|
||||
"Subscription credits are exhausted. "
|
||||
"Top up/renew credits in Nous Portal, then retry."
|
||||
)
|
||||
return "Subscription credits are exhausted. Top up/renew credits in Nous Portal, then retry."
|
||||
|
||||
if error.code == "temporarily_unavailable":
|
||||
return f"{error} Please retry in a few seconds."
|
||||
@@ -262,7 +257,7 @@ def format_auth_error(error: Exception) -> str:
|
||||
return str(error)
|
||||
|
||||
|
||||
def _token_fingerprint(token: Any) -> Optional[str]:
|
||||
def _token_fingerprint(token: Any) -> str | None:
|
||||
"""Return a short hash fingerprint for telemetry without leaking token bytes."""
|
||||
if not isinstance(token, str):
|
||||
return None
|
||||
@@ -277,10 +272,10 @@ def _oauth_trace_enabled() -> bool:
|
||||
return raw in {"1", "true", "yes", "on"}
|
||||
|
||||
|
||||
def _oauth_trace(event: str, *, sequence_id: Optional[str] = None, **fields: Any) -> None:
|
||||
def _oauth_trace(event: str, *, sequence_id: str | None = None, **fields: Any) -> None:
|
||||
if not _oauth_trace_enabled():
|
||||
return
|
||||
payload: Dict[str, Any] = {"event": event}
|
||||
payload: dict[str, Any] = {"event": event}
|
||||
if sequence_id:
|
||||
payload["sequence_id"] = sequence_id
|
||||
payload.update(fields)
|
||||
@@ -291,6 +286,7 @@ def _oauth_trace(event: str, *, sequence_id: Optional[str] = None, **fields: Any
|
||||
# Auth Store — persistence layer for ~/.hermes/auth.json
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _auth_file_path() -> Path:
|
||||
return get_hermes_home() / "auth.json"
|
||||
|
||||
@@ -326,7 +322,7 @@ def _auth_store_lock(timeout_seconds: float = AUTH_LOCK_TIMEOUT_SECONDS):
|
||||
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||
|
||||
|
||||
def _load_auth_store(auth_file: Optional[Path] = None) -> Dict[str, Any]:
|
||||
def _load_auth_store(auth_file: Path | None = None) -> dict[str, Any]:
|
||||
auth_file = auth_file or _auth_file_path()
|
||||
if not auth_file.exists():
|
||||
return {"version": AUTH_STORE_VERSION, "providers": {}}
|
||||
@@ -345,17 +341,16 @@ def _load_auth_store(auth_file: Optional[Path] = None) -> Dict[str, Any]:
|
||||
providers = {}
|
||||
if "nous_portal" in systems:
|
||||
providers["nous"] = systems["nous_portal"]
|
||||
return {"version": AUTH_STORE_VERSION, "providers": providers,
|
||||
"active_provider": "nous" if providers else None}
|
||||
return {"version": AUTH_STORE_VERSION, "providers": providers, "active_provider": "nous" if providers else None}
|
||||
|
||||
return {"version": AUTH_STORE_VERSION, "providers": {}}
|
||||
|
||||
|
||||
def _save_auth_store(auth_store: Dict[str, Any]) -> Path:
|
||||
def _save_auth_store(auth_store: dict[str, Any]) -> Path:
|
||||
auth_file = _auth_file_path()
|
||||
auth_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
auth_store["version"] = AUTH_STORE_VERSION
|
||||
auth_store["updated_at"] = datetime.now(timezone.utc).isoformat()
|
||||
auth_store["updated_at"] = datetime.now(UTC).isoformat()
|
||||
payload = json.dumps(auth_store, indent=2) + "\n"
|
||||
tmp_path = auth_file.with_name(f"{auth_file.name}.tmp.{os.getpid()}.{uuid.uuid4().hex}")
|
||||
try:
|
||||
@@ -387,7 +382,7 @@ def _save_auth_store(auth_store: Dict[str, Any]) -> Path:
|
||||
return auth_file
|
||||
|
||||
|
||||
def _load_provider_state(auth_store: Dict[str, Any], provider_id: str) -> Optional[Dict[str, Any]]:
|
||||
def _load_provider_state(auth_store: dict[str, Any], provider_id: str) -> dict[str, Any] | None:
|
||||
providers = auth_store.get("providers")
|
||||
if not isinstance(providers, dict):
|
||||
return None
|
||||
@@ -395,7 +390,7 @@ def _load_provider_state(auth_store: Dict[str, Any], provider_id: str) -> Option
|
||||
return dict(state) if isinstance(state, dict) else None
|
||||
|
||||
|
||||
def _save_provider_state(auth_store: Dict[str, Any], provider_id: str, state: Dict[str, Any]) -> None:
|
||||
def _save_provider_state(auth_store: dict[str, Any], provider_id: str, state: dict[str, Any]) -> None:
|
||||
providers = auth_store.setdefault("providers", {})
|
||||
if not isinstance(providers, dict):
|
||||
auth_store["providers"] = {}
|
||||
@@ -404,19 +399,19 @@ def _save_provider_state(auth_store: Dict[str, Any], provider_id: str, state: Di
|
||||
auth_store["active_provider"] = provider_id
|
||||
|
||||
|
||||
def get_provider_auth_state(provider_id: str) -> Optional[Dict[str, Any]]:
|
||||
def get_provider_auth_state(provider_id: str) -> dict[str, Any] | None:
|
||||
"""Return persisted auth state for a provider, or None."""
|
||||
auth_store = _load_auth_store()
|
||||
return _load_provider_state(auth_store, provider_id)
|
||||
|
||||
|
||||
def get_active_provider() -> Optional[str]:
|
||||
def get_active_provider() -> str | None:
|
||||
"""Return the currently active provider ID from auth store."""
|
||||
auth_store = _load_auth_store()
|
||||
return auth_store.get("active_provider")
|
||||
|
||||
|
||||
def clear_provider_auth(provider_id: Optional[str] = None) -> bool:
|
||||
def clear_provider_auth(provider_id: str | None = None) -> bool:
|
||||
"""
|
||||
Clear auth state for a provider. Used by `hermes logout`.
|
||||
If provider_id is None, clears the active provider.
|
||||
@@ -455,11 +450,12 @@ def deactivate_provider() -> None:
|
||||
# Provider Resolution — picks which provider to use
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def resolve_provider(
|
||||
requested: Optional[str] = None,
|
||||
requested: str | None = None,
|
||||
*,
|
||||
explicit_api_key: Optional[str] = None,
|
||||
explicit_base_url: Optional[str] = None,
|
||||
explicit_api_key: str | None = None,
|
||||
explicit_base_url: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Determine which inference provider to use.
|
||||
@@ -475,9 +471,14 @@ def resolve_provider(
|
||||
|
||||
# Normalize provider aliases
|
||||
_PROVIDER_ALIASES = {
|
||||
"glm": "zai", "z-ai": "zai", "z.ai": "zai", "zhipu": "zai",
|
||||
"kimi": "kimi-coding", "moonshot": "kimi-coding",
|
||||
"minimax-china": "minimax-cn", "minimax_cn": "minimax-cn",
|
||||
"glm": "zai",
|
||||
"z-ai": "zai",
|
||||
"z.ai": "zai",
|
||||
"zhipu": "zai",
|
||||
"kimi": "kimi-coding",
|
||||
"moonshot": "kimi-coding",
|
||||
"minimax-china": "minimax-cn",
|
||||
"minimax_cn": "minimax-cn",
|
||||
}
|
||||
normalized = _PROVIDER_ALIASES.get(normalized, normalized)
|
||||
|
||||
@@ -524,7 +525,8 @@ def resolve_provider(
|
||||
# Timestamp / TTL helpers
|
||||
# =============================================================================
|
||||
|
||||
def _parse_iso_timestamp(value: Any) -> Optional[float]:
|
||||
|
||||
def _parse_iso_timestamp(value: Any) -> float | None:
|
||||
if not isinstance(value, str) or not value:
|
||||
return None
|
||||
text = value.strip()
|
||||
@@ -537,7 +539,7 @@ def _parse_iso_timestamp(value: Any) -> Optional[float]:
|
||||
except Exception:
|
||||
return None
|
||||
if parsed.tzinfo is None:
|
||||
parsed = parsed.replace(tzinfo=timezone.utc)
|
||||
parsed = parsed.replace(tzinfo=UTC)
|
||||
return parsed.timestamp()
|
||||
|
||||
|
||||
@@ -556,14 +558,14 @@ def _coerce_ttl_seconds(expires_in: Any) -> int:
|
||||
return max(0, ttl)
|
||||
|
||||
|
||||
def _optional_base_url(value: Any) -> Optional[str]:
|
||||
def _optional_base_url(value: Any) -> str | None:
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
cleaned = value.strip().rstrip("/")
|
||||
return cleaned if cleaned else None
|
||||
|
||||
|
||||
def _decode_jwt_claims(token: Any) -> Dict[str, Any]:
|
||||
def _decode_jwt_claims(token: Any) -> dict[str, Any]:
|
||||
if not isinstance(token, str) or token.count(".") != 2:
|
||||
return {}
|
||||
payload = token.split(".")[1]
|
||||
@@ -588,6 +590,7 @@ def _codex_access_token_is_expiring(access_token: Any, skew_seconds: int) -> boo
|
||||
# SSH / remote session detection
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _is_remote_session() -> bool:
|
||||
"""Detect if running in an SSH session where webbrowser.open() won't work."""
|
||||
return bool(os.getenv("SSH_CLIENT") or os.getenv("SSH_TTY"))
|
||||
@@ -601,9 +604,10 @@ def _is_remote_session() -> bool:
|
||||
# where one app's refresh invalidates the other's session.
|
||||
# =============================================================================
|
||||
|
||||
def _read_codex_tokens(*, _lock: bool = True) -> Dict[str, Any]:
|
||||
|
||||
def _read_codex_tokens(*, _lock: bool = True) -> dict[str, Any]:
|
||||
"""Read Codex OAuth tokens from Hermes auth store (~/.hermes/auth.json).
|
||||
|
||||
|
||||
Returns dict with 'tokens' (access_token, refresh_token) and 'last_refresh'.
|
||||
Raises AuthError if no Codex tokens are stored.
|
||||
"""
|
||||
@@ -650,10 +654,10 @@ def _read_codex_tokens(*, _lock: bool = True) -> Dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def _save_codex_tokens(tokens: Dict[str, str], last_refresh: str = None) -> None:
|
||||
def _save_codex_tokens(tokens: dict[str, str], last_refresh: str = None) -> None:
|
||||
"""Save Codex OAuth tokens to Hermes auth store (~/.hermes/auth.json)."""
|
||||
if last_refresh is None:
|
||||
last_refresh = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
||||
last_refresh = datetime.now(UTC).isoformat().replace("+00:00", "Z")
|
||||
with _auth_store_lock():
|
||||
auth_store = _load_auth_store()
|
||||
state = _load_provider_state(auth_store, "openai-codex") or {}
|
||||
@@ -665,11 +669,11 @@ def _save_codex_tokens(tokens: Dict[str, str], last_refresh: str = None) -> None
|
||||
|
||||
|
||||
def _refresh_codex_auth_tokens(
|
||||
tokens: Dict[str, str],
|
||||
tokens: dict[str, str],
|
||||
timeout_seconds: float,
|
||||
) -> Dict[str, str]:
|
||||
) -> dict[str, str]:
|
||||
"""Refresh Codex access token using the refresh token.
|
||||
|
||||
|
||||
Saves the new tokens to Hermes auth store automatically.
|
||||
"""
|
||||
refresh_token = tokens.get("refresh_token")
|
||||
@@ -746,9 +750,9 @@ def _refresh_codex_auth_tokens(
|
||||
return updated_tokens
|
||||
|
||||
|
||||
def _import_codex_cli_tokens() -> Optional[Dict[str, str]]:
|
||||
def _import_codex_cli_tokens() -> dict[str, str] | None:
|
||||
"""Try to read tokens from ~/.codex/auth.json (Codex CLI shared file).
|
||||
|
||||
|
||||
Returns tokens dict if valid, None otherwise. Does NOT write to the shared file.
|
||||
"""
|
||||
codex_home = os.getenv("CODEX_HOME", "").strip()
|
||||
@@ -774,7 +778,7 @@ def resolve_codex_runtime_credentials(
|
||||
force_refresh: bool = False,
|
||||
refresh_if_expiring: bool = True,
|
||||
refresh_skew_seconds: int = CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Resolve runtime credentials from Hermes's own Codex token store."""
|
||||
try:
|
||||
data = _read_codex_tokens()
|
||||
@@ -817,10 +821,7 @@ def resolve_codex_runtime_credentials(
|
||||
tokens = _refresh_codex_auth_tokens(tokens, refresh_timeout_seconds)
|
||||
access_token = str(tokens.get("access_token", "") or "").strip()
|
||||
|
||||
base_url = (
|
||||
os.getenv("HERMES_CODEX_BASE_URL", "").strip().rstrip("/")
|
||||
or DEFAULT_CODEX_BASE_URL
|
||||
)
|
||||
base_url = os.getenv("HERMES_CODEX_BASE_URL", "").strip().rstrip("/") or DEFAULT_CODEX_BASE_URL
|
||||
|
||||
return {
|
||||
"provider": "openai-codex",
|
||||
@@ -836,24 +837,19 @@ def resolve_codex_runtime_credentials(
|
||||
# TLS verification helper
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _resolve_verify(
|
||||
*,
|
||||
insecure: Optional[bool] = None,
|
||||
ca_bundle: Optional[str] = None,
|
||||
auth_state: Optional[Dict[str, Any]] = None,
|
||||
insecure: bool | None = None,
|
||||
ca_bundle: str | None = None,
|
||||
auth_state: dict[str, Any] | None = None,
|
||||
) -> bool | str:
|
||||
tls_state = auth_state.get("tls") if isinstance(auth_state, dict) else {}
|
||||
tls_state = tls_state if isinstance(tls_state, dict) else {}
|
||||
|
||||
effective_insecure = (
|
||||
bool(insecure) if insecure is not None
|
||||
else bool(tls_state.get("insecure", False))
|
||||
)
|
||||
effective_insecure = bool(insecure) if insecure is not None else bool(tls_state.get("insecure", False))
|
||||
effective_ca = (
|
||||
ca_bundle
|
||||
or tls_state.get("ca_bundle")
|
||||
or os.getenv("HERMES_CA_BUNDLE")
|
||||
or os.getenv("SSL_CERT_FILE")
|
||||
ca_bundle or tls_state.get("ca_bundle") or os.getenv("HERMES_CA_BUNDLE") or os.getenv("SSL_CERT_FILE")
|
||||
)
|
||||
|
||||
if effective_insecure:
|
||||
@@ -867,12 +863,13 @@ def _resolve_verify(
|
||||
# OAuth Device Code Flow — generic, parameterized by provider
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _request_device_code(
|
||||
client: httpx.Client,
|
||||
portal_base_url: str,
|
||||
client_id: str,
|
||||
scope: Optional[str],
|
||||
) -> Dict[str, Any]:
|
||||
scope: str | None,
|
||||
) -> dict[str, Any]:
|
||||
"""POST to the device code endpoint. Returns device_code, user_code, etc."""
|
||||
response = client.post(
|
||||
f"{portal_base_url}/api/oauth/device/code",
|
||||
@@ -885,8 +882,12 @@ def _request_device_code(
|
||||
data = response.json()
|
||||
|
||||
required_fields = [
|
||||
"device_code", "user_code", "verification_uri",
|
||||
"verification_uri_complete", "expires_in", "interval",
|
||||
"device_code",
|
||||
"user_code",
|
||||
"verification_uri",
|
||||
"verification_uri_complete",
|
||||
"expires_in",
|
||||
"interval",
|
||||
]
|
||||
missing = [f for f in required_fields if f not in data]
|
||||
if missing:
|
||||
@@ -901,7 +902,7 @@ def _poll_for_token(
|
||||
device_code: str,
|
||||
expires_in: int,
|
||||
poll_interval: int,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Poll the token endpoint until the user approves or the code expires."""
|
||||
deadline = time.time() + max(1, expires_in)
|
||||
current_interval = max(1, min(poll_interval, DEVICE_AUTH_POLL_INTERVAL_CAP_SECONDS))
|
||||
@@ -947,13 +948,14 @@ def _poll_for_token(
|
||||
# Nous Portal — token refresh, agent key minting, model discovery
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _refresh_access_token(
|
||||
*,
|
||||
client: httpx.Client,
|
||||
portal_base_url: str,
|
||||
client_id: str,
|
||||
refresh_token: str,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
response = client.post(
|
||||
f"{portal_base_url}/api/oauth/token",
|
||||
data={
|
||||
@@ -966,15 +968,15 @@ def _refresh_access_token(
|
||||
if response.status_code == 200:
|
||||
payload = response.json()
|
||||
if "access_token" not in payload:
|
||||
raise AuthError("Refresh response missing access_token",
|
||||
provider="nous", code="invalid_token", relogin_required=True)
|
||||
raise AuthError(
|
||||
"Refresh response missing access_token", provider="nous", code="invalid_token", relogin_required=True
|
||||
)
|
||||
return payload
|
||||
|
||||
try:
|
||||
error_payload = response.json()
|
||||
except Exception as exc:
|
||||
raise AuthError("Refresh token exchange failed",
|
||||
provider="nous", relogin_required=True) from exc
|
||||
raise AuthError("Refresh token exchange failed", provider="nous", relogin_required=True) from exc
|
||||
|
||||
code = str(error_payload.get("error", "invalid_grant"))
|
||||
description = str(error_payload.get("error_description") or "Refresh token exchange failed")
|
||||
@@ -988,7 +990,7 @@ def _mint_agent_key(
|
||||
portal_base_url: str,
|
||||
access_token: str,
|
||||
min_ttl_seconds: int,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Mint (or reuse) a short-lived inference API key."""
|
||||
response = client.post(
|
||||
f"{portal_base_url}/api/oauth/agent-key",
|
||||
@@ -999,15 +1001,13 @@ def _mint_agent_key(
|
||||
if response.status_code == 200:
|
||||
payload = response.json()
|
||||
if "api_key" not in payload:
|
||||
raise AuthError("Mint response missing api_key",
|
||||
provider="nous", code="server_error")
|
||||
raise AuthError("Mint response missing api_key", provider="nous", code="server_error")
|
||||
return payload
|
||||
|
||||
try:
|
||||
error_payload = response.json()
|
||||
except Exception as exc:
|
||||
raise AuthError("Agent key mint request failed",
|
||||
provider="nous", code="server_error") from exc
|
||||
raise AuthError("Agent key mint request failed", provider="nous", code="server_error") from exc
|
||||
|
||||
code = str(error_payload.get("error", "server_error"))
|
||||
description = str(error_payload.get("error_description") or "Agent key mint request failed")
|
||||
@@ -1021,7 +1021,7 @@ def fetch_nous_models(
|
||||
api_key: str,
|
||||
timeout_seconds: float = 15.0,
|
||||
verify: bool | str = True,
|
||||
) -> List[str]:
|
||||
) -> list[str]:
|
||||
"""Fetch available model IDs from the Nous inference API."""
|
||||
timeout = httpx.Timeout(timeout_seconds)
|
||||
with httpx.Client(timeout=timeout, headers={"Accept": "application/json"}, verify=verify) as client:
|
||||
@@ -1044,7 +1044,7 @@ def fetch_nous_models(
|
||||
if not isinstance(data, list):
|
||||
return []
|
||||
|
||||
model_ids: List[str] = []
|
||||
model_ids: list[str] = []
|
||||
for item in data:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
@@ -1059,7 +1059,7 @@ def fetch_nous_models(
|
||||
return list(dict.fromkeys(model_ids))
|
||||
|
||||
|
||||
def _agent_key_is_usable(state: Dict[str, Any], min_ttl_seconds: int) -> bool:
|
||||
def _agent_key_is_usable(state: dict[str, Any], min_ttl_seconds: int) -> bool:
|
||||
key = state.get("agent_key")
|
||||
if not isinstance(key, str) or not key.strip():
|
||||
return False
|
||||
@@ -1070,10 +1070,10 @@ def resolve_nous_runtime_credentials(
|
||||
*,
|
||||
min_key_ttl_seconds: int = DEFAULT_AGENT_KEY_MIN_TTL_SECONDS,
|
||||
timeout_seconds: float = 15.0,
|
||||
insecure: Optional[bool] = None,
|
||||
ca_bundle: Optional[str] = None,
|
||||
insecure: bool | None = None,
|
||||
ca_bundle: str | None = None,
|
||||
force_mint: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Resolve Nous inference credentials for runtime use.
|
||||
|
||||
@@ -1092,8 +1092,7 @@ def resolve_nous_runtime_credentials(
|
||||
state = _load_provider_state(auth_store, "nous")
|
||||
|
||||
if not state:
|
||||
raise AuthError("Hermes is not logged into Nous Portal.",
|
||||
provider="nous", relogin_required=True)
|
||||
raise AuthError("Hermes is not logged into Nous Portal.", provider="nous", relogin_required=True)
|
||||
|
||||
portal_base_url = (
|
||||
_optional_base_url(state.get("portal_base_url"))
|
||||
@@ -1143,14 +1142,14 @@ def resolve_nous_runtime_credentials(
|
||||
refresh_token = state.get("refresh_token")
|
||||
|
||||
if not isinstance(access_token, str) or not access_token:
|
||||
raise AuthError("No access token found for Nous Portal login.",
|
||||
provider="nous", relogin_required=True)
|
||||
raise AuthError("No access token found for Nous Portal login.", provider="nous", relogin_required=True)
|
||||
|
||||
# Step 1: refresh access token if expiring
|
||||
if _is_expiring(state.get("expires_at"), ACCESS_TOKEN_REFRESH_SKEW_SECONDS):
|
||||
if not isinstance(refresh_token, str) or not refresh_token:
|
||||
raise AuthError("Session expired and no refresh token is available.",
|
||||
provider="nous", relogin_required=True)
|
||||
raise AuthError(
|
||||
"Session expired and no refresh token is available.", provider="nous", relogin_required=True
|
||||
)
|
||||
|
||||
_oauth_trace(
|
||||
"refresh_start",
|
||||
@@ -1159,10 +1158,12 @@ def resolve_nous_runtime_credentials(
|
||||
refresh_token_fp=_token_fingerprint(refresh_token),
|
||||
)
|
||||
refreshed = _refresh_access_token(
|
||||
client=client, portal_base_url=portal_base_url,
|
||||
client_id=client_id, refresh_token=refresh_token,
|
||||
client=client,
|
||||
portal_base_url=portal_base_url,
|
||||
client_id=client_id,
|
||||
refresh_token=refresh_token,
|
||||
)
|
||||
now = datetime.now(timezone.utc)
|
||||
now = datetime.now(UTC)
|
||||
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
|
||||
previous_refresh_token = refresh_token
|
||||
state["access_token"] = refreshed["access_token"]
|
||||
@@ -1174,9 +1175,7 @@ def resolve_nous_runtime_credentials(
|
||||
inference_base_url = refreshed_url
|
||||
state["obtained_at"] = now.isoformat()
|
||||
state["expires_in"] = access_ttl
|
||||
state["expires_at"] = datetime.fromtimestamp(
|
||||
now.timestamp() + access_ttl, tz=timezone.utc
|
||||
).isoformat()
|
||||
state["expires_at"] = datetime.fromtimestamp(now.timestamp() + access_ttl, tz=UTC).isoformat()
|
||||
access_token = state["access_token"]
|
||||
refresh_token = state["refresh_token"]
|
||||
_oauth_trace(
|
||||
@@ -1191,7 +1190,7 @@ def resolve_nous_runtime_credentials(
|
||||
|
||||
# Step 2: mint agent key if missing/expiring
|
||||
used_cached_key = False
|
||||
mint_payload: Optional[Dict[str, Any]] = None
|
||||
mint_payload: dict[str, Any] | None = None
|
||||
|
||||
if not force_mint and _agent_key_is_usable(state, min_key_ttl_seconds):
|
||||
used_cached_key = True
|
||||
@@ -1204,8 +1203,10 @@ def resolve_nous_runtime_credentials(
|
||||
access_token_fp=_token_fingerprint(access_token),
|
||||
)
|
||||
mint_payload = _mint_agent_key(
|
||||
client=client, portal_base_url=portal_base_url,
|
||||
access_token=access_token, min_ttl_seconds=min_key_ttl_seconds,
|
||||
client=client,
|
||||
portal_base_url=portal_base_url,
|
||||
access_token=access_token,
|
||||
min_ttl_seconds=min_key_ttl_seconds,
|
||||
)
|
||||
except AuthError as exc:
|
||||
_oauth_trace(
|
||||
@@ -1227,10 +1228,12 @@ def resolve_nous_runtime_credentials(
|
||||
refresh_token_fp=_token_fingerprint(latest_refresh_token),
|
||||
)
|
||||
refreshed = _refresh_access_token(
|
||||
client=client, portal_base_url=portal_base_url,
|
||||
client_id=client_id, refresh_token=latest_refresh_token,
|
||||
client=client,
|
||||
portal_base_url=portal_base_url,
|
||||
client_id=client_id,
|
||||
refresh_token=latest_refresh_token,
|
||||
)
|
||||
now = datetime.now(timezone.utc)
|
||||
now = datetime.now(UTC)
|
||||
access_ttl = _coerce_ttl_seconds(refreshed.get("expires_in"))
|
||||
state["access_token"] = refreshed["access_token"]
|
||||
state["refresh_token"] = refreshed.get("refresh_token") or latest_refresh_token
|
||||
@@ -1241,9 +1244,7 @@ def resolve_nous_runtime_credentials(
|
||||
inference_base_url = refreshed_url
|
||||
state["obtained_at"] = now.isoformat()
|
||||
state["expires_in"] = access_ttl
|
||||
state["expires_at"] = datetime.fromtimestamp(
|
||||
now.timestamp() + access_ttl, tz=timezone.utc
|
||||
).isoformat()
|
||||
state["expires_at"] = datetime.fromtimestamp(now.timestamp() + access_ttl, tz=UTC).isoformat()
|
||||
access_token = state["access_token"]
|
||||
refresh_token = state["refresh_token"]
|
||||
_oauth_trace(
|
||||
@@ -1257,14 +1258,16 @@ def resolve_nous_runtime_credentials(
|
||||
_persist_state("post_refresh_mint_retry")
|
||||
|
||||
mint_payload = _mint_agent_key(
|
||||
client=client, portal_base_url=portal_base_url,
|
||||
access_token=access_token, min_ttl_seconds=min_key_ttl_seconds,
|
||||
client=client,
|
||||
portal_base_url=portal_base_url,
|
||||
access_token=access_token,
|
||||
min_ttl_seconds=min_key_ttl_seconds,
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
if mint_payload is not None:
|
||||
now = datetime.now(timezone.utc)
|
||||
now = datetime.now(UTC)
|
||||
state["agent_key"] = mint_payload.get("api_key")
|
||||
state["agent_key_id"] = mint_payload.get("key_id")
|
||||
state["agent_key_expires_at"] = mint_payload.get("expires_at")
|
||||
@@ -1293,8 +1296,7 @@ def resolve_nous_runtime_credentials(
|
||||
|
||||
api_key = state.get("agent_key")
|
||||
if not isinstance(api_key, str) or not api_key:
|
||||
raise AuthError("Failed to resolve a Nous inference API key",
|
||||
provider="nous", code="server_error")
|
||||
raise AuthError("Failed to resolve a Nous inference API key", provider="nous", code="server_error")
|
||||
|
||||
expires_at = state.get("agent_key_expires_at")
|
||||
expires_epoch = _parse_iso_timestamp(expires_at)
|
||||
@@ -1319,7 +1321,8 @@ def resolve_nous_runtime_credentials(
|
||||
# Status helpers
|
||||
# =============================================================================
|
||||
|
||||
def get_nous_auth_status() -> Dict[str, Any]:
|
||||
|
||||
def get_nous_auth_status() -> dict[str, Any]:
|
||||
"""Status snapshot for `hermes status` output."""
|
||||
state = get_provider_auth_state("nous")
|
||||
if not state:
|
||||
@@ -1341,7 +1344,7 @@ def get_nous_auth_status() -> Dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def get_codex_auth_status() -> Dict[str, Any]:
|
||||
def get_codex_auth_status() -> dict[str, Any]:
|
||||
"""Status snapshot for Codex auth."""
|
||||
try:
|
||||
creds = resolve_codex_runtime_credentials()
|
||||
@@ -1360,7 +1363,7 @@ def get_codex_auth_status() -> Dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def get_api_key_provider_status(provider_id: str) -> Dict[str, Any]:
|
||||
def get_api_key_provider_status(provider_id: str) -> dict[str, Any]:
|
||||
"""Status snapshot for API-key providers (z.ai, Kimi, MiniMax)."""
|
||||
pconfig = PROVIDER_REGISTRY.get(provider_id)
|
||||
if not pconfig or pconfig.auth_type != "api_key":
|
||||
@@ -1396,7 +1399,7 @@ def get_api_key_provider_status(provider_id: str) -> Dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def get_auth_status(provider_id: Optional[str] = None) -> Dict[str, Any]:
|
||||
def get_auth_status(provider_id: str | None = None) -> dict[str, Any]:
|
||||
"""Generic auth status dispatcher."""
|
||||
target = provider_id or get_active_provider()
|
||||
if target == "nous":
|
||||
@@ -1410,7 +1413,7 @@ def get_auth_status(provider_id: Optional[str] = None) -> Dict[str, Any]:
|
||||
return {"logged_in": False}
|
||||
|
||||
|
||||
def resolve_api_key_provider_credentials(provider_id: str) -> Dict[str, Any]:
|
||||
def resolve_api_key_provider_credentials(provider_id: str) -> dict[str, Any]:
|
||||
"""Resolve API key and base URL for an API-key provider.
|
||||
|
||||
Returns dict with: provider, api_key, base_url, source.
|
||||
@@ -1455,7 +1458,8 @@ def resolve_api_key_provider_credentials(provider_id: str) -> Dict[str, Any]:
|
||||
# External credential detection
|
||||
# =============================================================================
|
||||
|
||||
def detect_external_credentials() -> List[Dict[str, Any]]:
|
||||
|
||||
def detect_external_credentials() -> list[dict[str, Any]]:
|
||||
"""Scan for credentials from other CLI tools that Hermes can reuse.
|
||||
|
||||
Returns a list of dicts, each with:
|
||||
@@ -1463,17 +1467,19 @@ def detect_external_credentials() -> List[Dict[str, Any]]:
|
||||
- path: str -- filesystem path where creds were found
|
||||
- label: str -- human-friendly description for the setup UI
|
||||
"""
|
||||
found: List[Dict[str, Any]] = []
|
||||
found: list[dict[str, Any]] = []
|
||||
|
||||
# Codex CLI: ~/.codex/auth.json (importable, not shared)
|
||||
cli_tokens = _import_codex_cli_tokens()
|
||||
if cli_tokens:
|
||||
codex_path = Path.home() / ".codex" / "auth.json"
|
||||
found.append({
|
||||
"provider": "openai-codex",
|
||||
"path": str(codex_path),
|
||||
"label": f"Codex CLI credentials found ({codex_path}) — run `hermes login` to create a separate session",
|
||||
})
|
||||
found.append(
|
||||
{
|
||||
"provider": "openai-codex",
|
||||
"path": str(codex_path),
|
||||
"label": f"Codex CLI credentials found ({codex_path}) — run `hermes login` to create a separate session",
|
||||
}
|
||||
)
|
||||
|
||||
return found
|
||||
|
||||
@@ -1482,6 +1488,7 @@ def detect_external_credentials() -> List[Dict[str, Any]]:
|
||||
# CLI Commands — login / logout
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _update_config_for_provider(provider_id: str, inference_base_url: str) -> Path:
|
||||
"""Update config.yaml and auth.json to reflect the active provider."""
|
||||
# Set active_provider in auth.json so auto-resolution picks this provider
|
||||
@@ -1494,7 +1501,7 @@ def _update_config_for_provider(provider_id: str, inference_base_url: str) -> Pa
|
||||
config_path = get_config_path()
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
config: Dict[str, Any] = {}
|
||||
config: dict[str, Any] = {}
|
||||
if config_path.exists():
|
||||
try:
|
||||
loaded = yaml.safe_load(config_path.read_text()) or {}
|
||||
@@ -1542,7 +1549,7 @@ def _reset_config_provider() -> Path:
|
||||
return config_path
|
||||
|
||||
|
||||
def _prompt_model_selection(model_ids: List[str], current_model: str = "") -> Optional[str]:
|
||||
def _prompt_model_selection(model_ids: list[str], current_model: str = "") -> str | None:
|
||||
"""Interactive model selection. Puts current_model first with a marker. Returns chosen model ID or None."""
|
||||
# Reorder: current model first, then the rest (deduplicated)
|
||||
ordered = []
|
||||
@@ -1564,6 +1571,7 @@ def _prompt_model_selection(model_ids: List[str], current_model: str = "") -> Op
|
||||
# Try arrow-key menu first, fall back to number input
|
||||
try:
|
||||
from simple_term_menu import TerminalMenu
|
||||
|
||||
choices = [f" {_label(mid)}" for mid in ordered]
|
||||
choices.append(" Enter custom model name")
|
||||
choices.append(" Skip (keep current)")
|
||||
@@ -1621,7 +1629,7 @@ def _prompt_model_selection(model_ids: List[str], current_model: str = "") -> Op
|
||||
|
||||
def _save_model_choice(model_id: str) -> None:
|
||||
"""Save the selected model to config.yaml and .env."""
|
||||
from hermes_cli.config import save_config, load_config, save_env_value
|
||||
from hermes_cli.config import load_config, save_config, save_env_value
|
||||
|
||||
config = load_config()
|
||||
# Handle both string and dict model formats
|
||||
@@ -1693,11 +1701,11 @@ def _login_openai_codex(args, pconfig: ProviderConfig) -> None:
|
||||
config_path = _update_config_for_provider("openai-codex", creds.get("base_url", DEFAULT_CODEX_BASE_URL))
|
||||
print()
|
||||
print("Login successful!")
|
||||
print(f" Auth state: ~/.hermes/auth.json")
|
||||
print(" Auth state: ~/.hermes/auth.json")
|
||||
print(f" Config updated: {config_path} (model.provider=openai-codex)")
|
||||
|
||||
|
||||
def _codex_device_code_login() -> Dict[str, Any]:
|
||||
def _codex_device_code_login() -> dict[str, Any]:
|
||||
"""Run the OpenAI device code login flow and return credentials dict."""
|
||||
import time as _time
|
||||
|
||||
@@ -1715,13 +1723,15 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
except Exception as exc:
|
||||
raise AuthError(
|
||||
f"Failed to request device code: {exc}",
|
||||
provider="openai-codex", code="device_code_request_failed",
|
||||
provider="openai-codex",
|
||||
code="device_code_request_failed",
|
||||
)
|
||||
|
||||
if resp.status_code != 200:
|
||||
raise AuthError(
|
||||
f"Device code request returned status {resp.status_code}.",
|
||||
provider="openai-codex", code="device_code_request_error",
|
||||
provider="openai-codex",
|
||||
code="device_code_request_error",
|
||||
)
|
||||
|
||||
device_data = resp.json()
|
||||
@@ -1732,14 +1742,15 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
if not user_code or not device_auth_id:
|
||||
raise AuthError(
|
||||
"Device code response missing required fields.",
|
||||
provider="openai-codex", code="device_code_incomplete",
|
||||
provider="openai-codex",
|
||||
code="device_code_incomplete",
|
||||
)
|
||||
|
||||
# Step 2: Show user the code
|
||||
print("To continue, follow these steps:\n")
|
||||
print(f" 1. Open this URL in your browser:")
|
||||
print(" 1. Open this URL in your browser:")
|
||||
print(f" \033[94m{issuer}/codex/device\033[0m\n")
|
||||
print(f" 2. Enter this code:")
|
||||
print(" 2. Enter this code:")
|
||||
print(f" \033[94m{user_code}\033[0m\n")
|
||||
print("Waiting for sign-in... (press Ctrl+C to cancel)")
|
||||
|
||||
@@ -1766,7 +1777,8 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
else:
|
||||
raise AuthError(
|
||||
f"Device auth polling returned status {poll_resp.status_code}.",
|
||||
provider="openai-codex", code="device_code_poll_error",
|
||||
provider="openai-codex",
|
||||
code="device_code_poll_error",
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
print("\nLogin cancelled.")
|
||||
@@ -1775,7 +1787,8 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
if code_resp is None:
|
||||
raise AuthError(
|
||||
"Login timed out after 15 minutes.",
|
||||
provider="openai-codex", code="device_code_timeout",
|
||||
provider="openai-codex",
|
||||
code="device_code_timeout",
|
||||
)
|
||||
|
||||
# Step 4: Exchange authorization code for tokens
|
||||
@@ -1786,7 +1799,8 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
if not authorization_code or not code_verifier:
|
||||
raise AuthError(
|
||||
"Device auth response missing authorization_code or code_verifier.",
|
||||
provider="openai-codex", code="device_code_incomplete_exchange",
|
||||
provider="openai-codex",
|
||||
code="device_code_incomplete_exchange",
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -1805,13 +1819,15 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
except Exception as exc:
|
||||
raise AuthError(
|
||||
f"Token exchange failed: {exc}",
|
||||
provider="openai-codex", code="token_exchange_failed",
|
||||
provider="openai-codex",
|
||||
code="token_exchange_failed",
|
||||
)
|
||||
|
||||
if token_resp.status_code != 200:
|
||||
raise AuthError(
|
||||
f"Token exchange returned status {token_resp.status_code}.",
|
||||
provider="openai-codex", code="token_exchange_error",
|
||||
provider="openai-codex",
|
||||
code="token_exchange_error",
|
||||
)
|
||||
|
||||
tokens = token_resp.json()
|
||||
@@ -1821,14 +1837,12 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
if not access_token:
|
||||
raise AuthError(
|
||||
"Token exchange did not return an access_token.",
|
||||
provider="openai-codex", code="token_exchange_no_access_token",
|
||||
provider="openai-codex",
|
||||
code="token_exchange_no_access_token",
|
||||
)
|
||||
|
||||
# Return tokens for the caller to persist (no longer writes to ~/.codex/)
|
||||
base_url = (
|
||||
os.getenv("HERMES_CODEX_BASE_URL", "").strip().rstrip("/")
|
||||
or DEFAULT_CODEX_BASE_URL
|
||||
)
|
||||
base_url = os.getenv("HERMES_CODEX_BASE_URL", "").strip().rstrip("/") or DEFAULT_CODEX_BASE_URL
|
||||
|
||||
return {
|
||||
"tokens": {
|
||||
@@ -1836,7 +1850,7 @@ def _codex_device_code_login() -> Dict[str, Any]:
|
||||
"refresh_token": refresh_token,
|
||||
},
|
||||
"base_url": base_url,
|
||||
"last_refresh": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
||||
"last_refresh": datetime.now(UTC).isoformat().replace("+00:00", "Z"),
|
||||
"auth_mode": "chatgpt",
|
||||
"source": "device-code",
|
||||
}
|
||||
@@ -1851,9 +1865,7 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
|
||||
or pconfig.portal_base_url
|
||||
).rstrip("/")
|
||||
requested_inference_url = (
|
||||
getattr(args, "inference_url", None)
|
||||
or os.getenv("NOUS_INFERENCE_BASE_URL")
|
||||
or pconfig.inference_base_url
|
||||
getattr(args, "inference_url", None) or os.getenv("NOUS_INFERENCE_BASE_URL") or pconfig.inference_base_url
|
||||
).rstrip("/")
|
||||
client_id = getattr(args, "client_id", None) or pconfig.client_id
|
||||
scope = getattr(args, "scope", None) or pconfig.scope
|
||||
@@ -1862,11 +1874,7 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
|
||||
timeout = httpx.Timeout(timeout_seconds)
|
||||
|
||||
insecure = bool(getattr(args, "insecure", False))
|
||||
ca_bundle = (
|
||||
getattr(args, "ca_bundle", None)
|
||||
or os.getenv("HERMES_CA_BUNDLE")
|
||||
or os.getenv("SSL_CERT_FILE")
|
||||
)
|
||||
ca_bundle = getattr(args, "ca_bundle", None) or os.getenv("HERMES_CA_BUNDLE") or os.getenv("SSL_CERT_FILE")
|
||||
verify: bool | str = False if insecure else (ca_bundle if ca_bundle else True)
|
||||
|
||||
# Skip browser open in SSH sessions
|
||||
@@ -1883,8 +1891,10 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
|
||||
try:
|
||||
with httpx.Client(timeout=timeout, headers={"Accept": "application/json"}, verify=verify) as client:
|
||||
device_data = _request_device_code(
|
||||
client=client, portal_base_url=portal_base_url,
|
||||
client_id=client_id, scope=scope,
|
||||
client=client,
|
||||
portal_base_url=portal_base_url,
|
||||
client_id=client_id,
|
||||
scope=scope,
|
||||
)
|
||||
|
||||
verification_url = str(device_data["verification_uri_complete"])
|
||||
@@ -1908,19 +1918,19 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
|
||||
print(f"Waiting for approval (polling every {effective_interval}s)...")
|
||||
|
||||
token_data = _poll_for_token(
|
||||
client=client, portal_base_url=portal_base_url,
|
||||
client_id=client_id, device_code=str(device_data["device_code"]),
|
||||
expires_in=expires_in, poll_interval=interval,
|
||||
client=client,
|
||||
portal_base_url=portal_base_url,
|
||||
client_id=client_id,
|
||||
device_code=str(device_data["device_code"]),
|
||||
expires_in=expires_in,
|
||||
poll_interval=interval,
|
||||
)
|
||||
|
||||
# Process token response
|
||||
now = datetime.now(timezone.utc)
|
||||
now = datetime.now(UTC)
|
||||
token_expires_in = _coerce_ttl_seconds(token_data.get("expires_in", 0))
|
||||
expires_at = now.timestamp() + token_expires_in
|
||||
inference_base_url = (
|
||||
_optional_base_url(token_data.get("inference_base_url"))
|
||||
or requested_inference_url
|
||||
)
|
||||
inference_base_url = _optional_base_url(token_data.get("inference_base_url")) or requested_inference_url
|
||||
if inference_base_url != requested_inference_url:
|
||||
print(f"Using portal-provided inference URL: {inference_base_url}")
|
||||
|
||||
@@ -1933,7 +1943,7 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
|
||||
"access_token": token_data["access_token"],
|
||||
"refresh_token": token_data.get("refresh_token"),
|
||||
"obtained_at": now.isoformat(),
|
||||
"expires_at": datetime.fromtimestamp(expires_at, tz=timezone.utc).isoformat(),
|
||||
"expires_at": datetime.fromtimestamp(expires_at, tz=UTC).isoformat(),
|
||||
"expires_in": token_expires_in,
|
||||
"tls": {
|
||||
"insecure": verify is False,
|
||||
@@ -1964,13 +1974,13 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
|
||||
runtime_creds = resolve_nous_runtime_credentials(
|
||||
min_key_ttl_seconds=5 * 60,
|
||||
timeout_seconds=timeout_seconds,
|
||||
insecure=insecure, ca_bundle=ca_bundle,
|
||||
insecure=insecure,
|
||||
ca_bundle=ca_bundle,
|
||||
)
|
||||
runtime_key = runtime_creds.get("api_key")
|
||||
runtime_base_url = runtime_creds.get("base_url") or inference_base_url
|
||||
if not isinstance(runtime_key, str) or not runtime_key:
|
||||
raise AuthError("No runtime API key available to fetch models",
|
||||
provider="nous", code="invalid_token")
|
||||
raise AuthError("No runtime API key available to fetch models", provider="nous", code="invalid_token")
|
||||
|
||||
model_ids = fetch_nous_models(
|
||||
inference_base_url=runtime_base_url,
|
||||
|
||||
@@ -9,14 +9,12 @@ import os
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
|
||||
from prompt_toolkit import print_formatted_text as _pt_print
|
||||
from prompt_toolkit.formatted_text import ANSI as _PT_ANSI
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -77,7 +75,8 @@ COMPACT_BANNER = """
|
||||
# Skills scanning
|
||||
# =========================================================================
|
||||
|
||||
def get_available_skills() -> Dict[str, List[str]]:
|
||||
|
||||
def get_available_skills() -> dict[str, list[str]]:
|
||||
"""Scan ~/.hermes/skills/ and return skills grouped by category."""
|
||||
import os
|
||||
|
||||
@@ -110,7 +109,7 @@ def get_available_skills() -> Dict[str, List[str]]:
|
||||
_UPDATE_CHECK_CACHE_SECONDS = 6 * 3600
|
||||
|
||||
|
||||
def check_for_updates() -> Optional[int]:
|
||||
def check_for_updates() -> int | None:
|
||||
"""Check how many commits behind origin/main the local repo is.
|
||||
|
||||
Does a ``git fetch`` at most once every 6 hours (cached to
|
||||
@@ -139,7 +138,8 @@ def check_for_updates() -> Optional[int]:
|
||||
try:
|
||||
subprocess.run(
|
||||
["git", "fetch", "origin", "--quiet"],
|
||||
capture_output=True, timeout=10,
|
||||
capture_output=True,
|
||||
timeout=10,
|
||||
cwd=str(repo_dir),
|
||||
)
|
||||
except Exception:
|
||||
@@ -149,7 +149,9 @@ def check_for_updates() -> Optional[int]:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "rev-list", "--count", "HEAD..origin/main"],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
cwd=str(repo_dir),
|
||||
)
|
||||
if result.returncode == 0:
|
||||
@@ -172,6 +174,7 @@ def check_for_updates() -> Optional[int]:
|
||||
# Welcome banner
|
||||
# =========================================================================
|
||||
|
||||
|
||||
def _format_context_length(tokens: int) -> str:
|
||||
"""Format a token count for display (e.g. 128000 → '128K', 1048576 → '1M')."""
|
||||
if tokens >= 1_000_000:
|
||||
@@ -183,12 +186,16 @@ def _format_context_length(tokens: int) -> str:
|
||||
return str(tokens)
|
||||
|
||||
|
||||
def build_welcome_banner(console: Console, model: str, cwd: str,
|
||||
tools: List[dict] = None,
|
||||
enabled_toolsets: List[str] = None,
|
||||
session_id: str = None,
|
||||
get_toolset_for_tool=None,
|
||||
context_length: int = None):
|
||||
def build_welcome_banner(
|
||||
console: Console,
|
||||
model: str,
|
||||
cwd: str,
|
||||
tools: list[dict] = None,
|
||||
enabled_toolsets: list[str] = None,
|
||||
session_id: str = None,
|
||||
get_toolset_for_tool=None,
|
||||
context_length: int = None,
|
||||
):
|
||||
"""Build and print a welcome banner with caduceus on left and info on right.
|
||||
|
||||
Args:
|
||||
@@ -201,7 +208,8 @@ def build_welcome_banner(console: Console, model: str, cwd: str,
|
||||
get_toolset_for_tool: Callable to map tool name -> toolset name.
|
||||
context_length: Model's context window size in tokens.
|
||||
"""
|
||||
from model_tools import check_tool_availability, TOOLSET_REQUIREMENTS
|
||||
from model_tools import check_tool_availability
|
||||
|
||||
if get_toolset_for_tool is None:
|
||||
from model_tools import get_toolset_for_tool
|
||||
|
||||
@@ -221,7 +229,9 @@ def build_welcome_banner(console: Console, model: str, cwd: str,
|
||||
model_short = model.split("/")[-1] if "/" in model else model
|
||||
if len(model_short) > 28:
|
||||
model_short = model_short[:25] + "..."
|
||||
ctx_str = f" [dim #B8860B]·[/] [dim #B8860B]{_format_context_length(context_length)} context[/]" if context_length else ""
|
||||
ctx_str = (
|
||||
f" [dim #B8860B]·[/] [dim #B8860B]{_format_context_length(context_length)} context[/]" if context_length else ""
|
||||
)
|
||||
left_lines.append(f"[#FFBF00]{model_short}[/]{ctx_str} [dim #B8860B]·[/] [dim #B8860B]Nous Research[/]")
|
||||
left_lines.append(f"[dim #B8860B]{cwd}[/]")
|
||||
if session_id:
|
||||
@@ -229,7 +239,7 @@ def build_welcome_banner(console: Console, model: str, cwd: str,
|
||||
left_content = "\n".join(left_lines)
|
||||
|
||||
right_lines = ["[bold #FFBF00]Available Tools[/]"]
|
||||
toolsets_dict: Dict[str, list] = {}
|
||||
toolsets_dict: dict[str, list] = {}
|
||||
|
||||
for tool in tools:
|
||||
tool_name = tool["function"]["name"]
|
||||
@@ -286,6 +296,7 @@ def build_welcome_banner(console: Console, model: str, cwd: str,
|
||||
# MCP Servers section (only if configured)
|
||||
try:
|
||||
from tools.mcp_tool import get_mcp_status
|
||||
|
||||
mcp_status = get_mcp_status()
|
||||
except Exception:
|
||||
mcp_status = []
|
||||
@@ -300,10 +311,7 @@ def build_welcome_banner(console: Console, model: str, cwd: str,
|
||||
f"[dim #B8860B]—[/] [#FFF8DC]{srv['tools']} tool(s)[/]"
|
||||
)
|
||||
else:
|
||||
right_lines.append(
|
||||
f"[red]{srv['name']}[/] [dim]({srv['transport']})[/] "
|
||||
f"[red]— failed[/]"
|
||||
)
|
||||
right_lines.append(f"[red]{srv['name']}[/] [dim]({srv['transport']})[/] [red]— failed[/]")
|
||||
|
||||
right_lines.append("")
|
||||
right_lines.append("[bold #FFBF00]Available Skills[/]")
|
||||
|
||||
@@ -9,7 +9,7 @@ with the TUI.
|
||||
import queue
|
||||
import time as _time
|
||||
|
||||
from hermes_cli.banner import cprint, _DIM, _RST
|
||||
from hermes_cli.banner import _DIM, _RST, cprint
|
||||
|
||||
|
||||
def clarify_callback(cli, question, choices):
|
||||
@@ -33,7 +33,7 @@ def clarify_callback(cli, question, choices):
|
||||
cli._clarify_deadline = _time.monotonic() + timeout
|
||||
cli._clarify_freetext = is_open_ended
|
||||
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
|
||||
while True:
|
||||
@@ -45,13 +45,13 @@ def clarify_callback(cli, question, choices):
|
||||
remaining = cli._clarify_deadline - _time.monotonic()
|
||||
if remaining <= 0:
|
||||
break
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
|
||||
cli._clarify_state = None
|
||||
cli._clarify_freetext = False
|
||||
cli._clarify_deadline = 0
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
cprint(f"\n{_DIM}(clarify timed out after {timeout}s — agent will decide){_RST}")
|
||||
return (
|
||||
@@ -71,7 +71,7 @@ def sudo_password_callback(cli) -> str:
|
||||
cli._sudo_state = {"response_queue": response_queue}
|
||||
cli._sudo_deadline = _time.monotonic() + timeout
|
||||
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
|
||||
while True:
|
||||
@@ -79,7 +79,7 @@ def sudo_password_callback(cli) -> str:
|
||||
result = response_queue.get(timeout=1)
|
||||
cli._sudo_state = None
|
||||
cli._sudo_deadline = 0
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
if result:
|
||||
cprint(f"\n{_DIM} ✓ Password received (cached for session){_RST}")
|
||||
@@ -90,12 +90,12 @@ def sudo_password_callback(cli) -> str:
|
||||
remaining = cli._sudo_deadline - _time.monotonic()
|
||||
if remaining <= 0:
|
||||
break
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
|
||||
cli._sudo_state = None
|
||||
cli._sudo_deadline = 0
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
cprint(f"\n{_DIM} ⏱ Timeout — continuing without sudo{_RST}")
|
||||
return ""
|
||||
@@ -119,7 +119,7 @@ def approval_callback(cli, command: str, description: str) -> str:
|
||||
}
|
||||
cli._approval_deadline = _time.monotonic() + timeout
|
||||
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
|
||||
while True:
|
||||
@@ -127,19 +127,19 @@ def approval_callback(cli, command: str, description: str) -> str:
|
||||
result = response_queue.get(timeout=1)
|
||||
cli._approval_state = None
|
||||
cli._approval_deadline = 0
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
return result
|
||||
except queue.Empty:
|
||||
remaining = cli._approval_deadline - _time.monotonic()
|
||||
if remaining <= 0:
|
||||
break
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
|
||||
cli._approval_state = None
|
||||
cli._approval_deadline = 0
|
||||
if hasattr(cli, '_app') and cli._app:
|
||||
if hasattr(cli, "_app") and cli._app:
|
||||
cli._app.invalidate()
|
||||
cprint(f"\n{_DIM} ⏱ Timeout — denying command{_RST}")
|
||||
return "deny"
|
||||
|
||||
@@ -51,6 +51,7 @@ def has_clipboard_image() -> bool:
|
||||
|
||||
# ── macOS ────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _macos_save(dest: Path) -> bool:
|
||||
"""Try pngpaste first (fast, handles more formats), fall back to osascript."""
|
||||
return _macos_pngpaste(dest) or _macos_osascript(dest)
|
||||
@@ -61,7 +62,9 @@ def _macos_has_image() -> bool:
|
||||
try:
|
||||
info = subprocess.run(
|
||||
["osascript", "-e", "clipboard info"],
|
||||
capture_output=True, text=True, timeout=3,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=3,
|
||||
)
|
||||
return "«class PNGf»" in info.stdout or "«class TIFF»" in info.stdout
|
||||
except Exception:
|
||||
@@ -73,7 +76,8 @@ def _macos_pngpaste(dest: Path) -> bool:
|
||||
try:
|
||||
r = subprocess.run(
|
||||
["pngpaste", str(dest)],
|
||||
capture_output=True, timeout=3,
|
||||
capture_output=True,
|
||||
timeout=3,
|
||||
)
|
||||
if r.returncode == 0 and dest.exists() and dest.stat().st_size > 0:
|
||||
return True
|
||||
@@ -91,19 +95,21 @@ def _macos_osascript(dest: Path) -> bool:
|
||||
|
||||
# Extract as PNG
|
||||
script = (
|
||||
'try\n'
|
||||
' set imgData to the clipboard as «class PNGf»\n'
|
||||
"try\n"
|
||||
" set imgData to the clipboard as «class PNGf»\n"
|
||||
f' set f to open for access POSIX file "{dest}" with write permission\n'
|
||||
' write imgData to f\n'
|
||||
' close access f\n'
|
||||
'on error\n'
|
||||
" write imgData to f\n"
|
||||
" close access f\n"
|
||||
"on error\n"
|
||||
' return "fail"\n'
|
||||
'end try\n'
|
||||
"end try\n"
|
||||
)
|
||||
try:
|
||||
r = subprocess.run(
|
||||
["osascript", "-e", script],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
if r.returncode == 0 and "fail" not in r.stdout and dest.exists() and dest.stat().st_size > 0:
|
||||
return True
|
||||
@@ -114,13 +120,14 @@ def _macos_osascript(dest: Path) -> bool:
|
||||
|
||||
# ── Linux ────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _is_wsl() -> bool:
|
||||
"""Detect if running inside WSL (1 or 2)."""
|
||||
global _wsl_detected
|
||||
if _wsl_detected is not None:
|
||||
return _wsl_detected
|
||||
try:
|
||||
with open("/proc/version", "r") as f:
|
||||
with open("/proc/version") as f:
|
||||
_wsl_detected = "microsoft" in f.read().lower()
|
||||
except Exception:
|
||||
_wsl_detected = False
|
||||
@@ -145,10 +152,7 @@ def _linux_save(dest: Path) -> bool:
|
||||
|
||||
# PowerShell script: get clipboard image as base64-encoded PNG on stdout.
|
||||
# Using .NET System.Windows.Forms.Clipboard — always available on Windows.
|
||||
_PS_CHECK_IMAGE = (
|
||||
"Add-Type -AssemblyName System.Windows.Forms;"
|
||||
"[System.Windows.Forms.Clipboard]::ContainsImage()"
|
||||
)
|
||||
_PS_CHECK_IMAGE = "Add-Type -AssemblyName System.Windows.Forms;[System.Windows.Forms.Clipboard]::ContainsImage()"
|
||||
|
||||
_PS_EXTRACT_IMAGE = (
|
||||
"Add-Type -AssemblyName System.Windows.Forms;"
|
||||
@@ -165,9 +169,10 @@ def _wsl_has_image() -> bool:
|
||||
"""Check if Windows clipboard has an image (via powershell.exe)."""
|
||||
try:
|
||||
r = subprocess.run(
|
||||
["powershell.exe", "-NoProfile", "-NonInteractive", "-Command",
|
||||
_PS_CHECK_IMAGE],
|
||||
capture_output=True, text=True, timeout=8,
|
||||
["powershell.exe", "-NoProfile", "-NonInteractive", "-Command", _PS_CHECK_IMAGE],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=8,
|
||||
)
|
||||
return r.returncode == 0 and "True" in r.stdout
|
||||
except FileNotFoundError:
|
||||
@@ -181,9 +186,10 @@ def _wsl_save(dest: Path) -> bool:
|
||||
"""Extract clipboard image via powershell.exe → base64 → decode to PNG."""
|
||||
try:
|
||||
r = subprocess.run(
|
||||
["powershell.exe", "-NoProfile", "-NonInteractive", "-Command",
|
||||
_PS_EXTRACT_IMAGE],
|
||||
capture_output=True, text=True, timeout=15,
|
||||
["powershell.exe", "-NoProfile", "-NonInteractive", "-Command", _PS_EXTRACT_IMAGE],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=15,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
return False
|
||||
@@ -206,16 +212,17 @@ def _wsl_save(dest: Path) -> bool:
|
||||
|
||||
# ── Wayland (wl-paste) ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _wayland_has_image() -> bool:
|
||||
"""Check if Wayland clipboard has image content."""
|
||||
try:
|
||||
r = subprocess.run(
|
||||
["wl-paste", "--list-types"],
|
||||
capture_output=True, text=True, timeout=3,
|
||||
)
|
||||
return r.returncode == 0 and any(
|
||||
t.startswith("image/") for t in r.stdout.splitlines()
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=3,
|
||||
)
|
||||
return r.returncode == 0 and any(t.startswith("image/") for t in r.stdout.splitlines())
|
||||
except FileNotFoundError:
|
||||
logger.debug("wl-paste not installed — Wayland clipboard unavailable")
|
||||
except Exception:
|
||||
@@ -229,7 +236,9 @@ def _wayland_save(dest: Path) -> bool:
|
||||
# Check available MIME types
|
||||
types_r = subprocess.run(
|
||||
["wl-paste", "--list-types"],
|
||||
capture_output=True, text=True, timeout=3,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=3,
|
||||
)
|
||||
if types_r.returncode != 0:
|
||||
return False
|
||||
@@ -237,8 +246,7 @@ def _wayland_save(dest: Path) -> bool:
|
||||
|
||||
# Prefer PNG, fall back to other image formats
|
||||
mime = None
|
||||
for preferred in ("image/png", "image/jpeg", "image/bmp",
|
||||
"image/gif", "image/webp"):
|
||||
for preferred in ("image/png", "image/jpeg", "image/bmp", "image/gif", "image/webp"):
|
||||
if preferred in types:
|
||||
mime = preferred
|
||||
break
|
||||
@@ -250,7 +258,10 @@ def _wayland_save(dest: Path) -> bool:
|
||||
with open(dest, "wb") as f:
|
||||
subprocess.run(
|
||||
["wl-paste", "--type", mime],
|
||||
stdout=f, stderr=subprocess.DEVNULL, timeout=5, check=True,
|
||||
stdout=f,
|
||||
stderr=subprocess.DEVNULL,
|
||||
timeout=5,
|
||||
check=True,
|
||||
)
|
||||
|
||||
if not dest.exists() or dest.stat().st_size == 0:
|
||||
@@ -276,6 +287,7 @@ def _convert_to_png(path: Path) -> bool:
|
||||
# Try Pillow first (likely installed in the venv)
|
||||
try:
|
||||
from PIL import Image
|
||||
|
||||
img = Image.open(path)
|
||||
img.save(path, "PNG")
|
||||
return True
|
||||
@@ -290,7 +302,8 @@ def _convert_to_png(path: Path) -> bool:
|
||||
path.rename(tmp)
|
||||
r = subprocess.run(
|
||||
["convert", str(tmp), "png:" + str(path)],
|
||||
capture_output=True, timeout=5,
|
||||
capture_output=True,
|
||||
timeout=5,
|
||||
)
|
||||
tmp.unlink(missing_ok=True)
|
||||
if r.returncode == 0 and path.exists() and path.stat().st_size > 0:
|
||||
@@ -310,12 +323,15 @@ def _convert_to_png(path: Path) -> bool:
|
||||
|
||||
# ── X11 (xclip) ─────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _xclip_has_image() -> bool:
|
||||
"""Check if X11 clipboard has image content."""
|
||||
try:
|
||||
r = subprocess.run(
|
||||
["xclip", "-selection", "clipboard", "-t", "TARGETS", "-o"],
|
||||
capture_output=True, text=True, timeout=3,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=3,
|
||||
)
|
||||
return r.returncode == 0 and "image/png" in r.stdout
|
||||
except FileNotFoundError:
|
||||
@@ -331,7 +347,9 @@ def _xclip_save(dest: Path) -> bool:
|
||||
try:
|
||||
targets = subprocess.run(
|
||||
["xclip", "-selection", "clipboard", "-t", "TARGETS", "-o"],
|
||||
capture_output=True, text=True, timeout=3,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=3,
|
||||
)
|
||||
if "image/png" not in targets.stdout:
|
||||
return False
|
||||
@@ -346,7 +364,10 @@ def _xclip_save(dest: Path) -> bool:
|
||||
with open(dest, "wb") as f:
|
||||
subprocess.run(
|
||||
["xclip", "-selection", "clipboard", "-t", "image/png", "-o"],
|
||||
stdout=f, stderr=subprocess.DEVNULL, timeout=5, check=True,
|
||||
stdout=f,
|
||||
stderr=subprocess.DEVNULL,
|
||||
timeout=5,
|
||||
check=True,
|
||||
)
|
||||
if dest.exists() and dest.stat().st_size > 0:
|
||||
return True
|
||||
|
||||
@@ -4,14 +4,12 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_CODEX_MODELS: List[str] = [
|
||||
DEFAULT_CODEX_MODELS: list[str] = [
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2-codex",
|
||||
"gpt-5.1-codex-max",
|
||||
@@ -19,10 +17,11 @@ DEFAULT_CODEX_MODELS: List[str] = [
|
||||
]
|
||||
|
||||
|
||||
def _fetch_models_from_api(access_token: str) -> List[str]:
|
||||
def _fetch_models_from_api(access_token: str) -> list[str]:
|
||||
"""Fetch available models from the Codex API. Returns visible models sorted by priority."""
|
||||
try:
|
||||
import httpx
|
||||
|
||||
resp = httpx.get(
|
||||
"https://chatgpt.com/backend-api/codex/models?client_version=1.0.0",
|
||||
headers={"Authorization": f"Bearer {access_token}"},
|
||||
@@ -47,7 +46,7 @@ def _fetch_models_from_api(access_token: str) -> List[str]:
|
||||
if item.get("supported_in_api") is False:
|
||||
continue
|
||||
visibility = item.get("visibility", "")
|
||||
if isinstance(visibility, str) and visibility.strip().lower() == "hide":
|
||||
if isinstance(visibility, str) and visibility.strip().lower() == "hidden":
|
||||
continue
|
||||
priority = item.get("priority")
|
||||
rank = int(priority) if isinstance(priority, (int, float)) else 10_000
|
||||
@@ -57,7 +56,7 @@ def _fetch_models_from_api(access_token: str) -> List[str]:
|
||||
return [slug for _, slug in sortable]
|
||||
|
||||
|
||||
def _read_default_model(codex_home: Path) -> Optional[str]:
|
||||
def _read_default_model(codex_home: Path) -> str | None:
|
||||
config_path = codex_home / "config.toml"
|
||||
if not config_path.exists():
|
||||
return None
|
||||
@@ -75,7 +74,7 @@ def _read_default_model(codex_home: Path) -> Optional[str]:
|
||||
return None
|
||||
|
||||
|
||||
def _read_cache_models(codex_home: Path) -> List[str]:
|
||||
def _read_cache_models(codex_home: Path) -> list[str]:
|
||||
cache_path = codex_home / "models_cache.json"
|
||||
if not cache_path.exists():
|
||||
return []
|
||||
@@ -104,22 +103,22 @@ def _read_cache_models(codex_home: Path) -> List[str]:
|
||||
sortable.append((rank, slug))
|
||||
|
||||
sortable.sort(key=lambda item: (item[0], item[1]))
|
||||
deduped: List[str] = []
|
||||
deduped: list[str] = []
|
||||
for _, slug in sortable:
|
||||
if slug not in deduped:
|
||||
deduped.append(slug)
|
||||
return deduped
|
||||
|
||||
|
||||
def get_codex_model_ids(access_token: Optional[str] = None) -> List[str]:
|
||||
def get_codex_model_ids(access_token: str | None = None) -> list[str]:
|
||||
"""Return available Codex model IDs, trying API first, then local sources.
|
||||
|
||||
|
||||
Resolution order: API (live, if token provided) > config.toml default >
|
||||
local cache > hardcoded defaults.
|
||||
"""
|
||||
codex_home_str = os.getenv("CODEX_HOME", "").strip() or str(Path.home() / ".codex")
|
||||
codex_home = Path(codex_home_str).expanduser()
|
||||
ordered: List[str] = []
|
||||
ordered: list[str] = []
|
||||
|
||||
# Try live API if we have a token
|
||||
if access_token:
|
||||
|
||||
@@ -12,7 +12,6 @@ from typing import Any
|
||||
|
||||
from prompt_toolkit.completion import Completer, Completion
|
||||
|
||||
|
||||
COMMANDS = {
|
||||
"/help": "Show this help message",
|
||||
"/tools": "List available tools",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,46 +20,46 @@ from hermes_cli.colors import Colors, color
|
||||
def cron_list(show_all: bool = False):
|
||||
"""List all scheduled jobs."""
|
||||
from cron.jobs import list_jobs
|
||||
|
||||
|
||||
jobs = list_jobs(include_disabled=show_all)
|
||||
|
||||
|
||||
if not jobs:
|
||||
print(color("No scheduled jobs.", Colors.DIM))
|
||||
print(color("Create one with the /cron add command in chat, or via Telegram.", Colors.DIM))
|
||||
return
|
||||
|
||||
|
||||
print()
|
||||
print(color("┌─────────────────────────────────────────────────────────────────────────┐", Colors.CYAN))
|
||||
print(color("│ Scheduled Jobs │", Colors.CYAN))
|
||||
print(color("└─────────────────────────────────────────────────────────────────────────┘", Colors.CYAN))
|
||||
print()
|
||||
|
||||
|
||||
for job in jobs:
|
||||
job_id = job.get("id", "?")[:8]
|
||||
name = job.get("name", "(unnamed)")
|
||||
schedule = job.get("schedule_display", job.get("schedule", {}).get("value", "?"))
|
||||
enabled = job.get("enabled", True)
|
||||
next_run = job.get("next_run_at", "?")
|
||||
|
||||
|
||||
repeat_info = job.get("repeat", {})
|
||||
repeat_times = repeat_info.get("times")
|
||||
repeat_completed = repeat_info.get("completed", 0)
|
||||
|
||||
|
||||
if repeat_times:
|
||||
repeat_str = f"{repeat_completed}/{repeat_times}"
|
||||
else:
|
||||
repeat_str = "∞"
|
||||
|
||||
|
||||
deliver = job.get("deliver", ["local"])
|
||||
if isinstance(deliver, str):
|
||||
deliver = [deliver]
|
||||
deliver_str = ", ".join(deliver)
|
||||
|
||||
|
||||
if not enabled:
|
||||
status = color("[disabled]", Colors.RED)
|
||||
else:
|
||||
status = color("[active]", Colors.GREEN)
|
||||
|
||||
|
||||
print(f" {color(job_id, Colors.YELLOW)} {status}")
|
||||
print(f" Name: {name}")
|
||||
print(f" Schedule: {schedule}")
|
||||
@@ -67,9 +67,10 @@ def cron_list(show_all: bool = False):
|
||||
print(f" Next run: {next_run}")
|
||||
print(f" Deliver: {deliver_str}")
|
||||
print()
|
||||
|
||||
|
||||
# Warn if gateway isn't running
|
||||
from hermes_cli.gateway import find_gateway_pids
|
||||
|
||||
if not find_gateway_pids():
|
||||
print(color(" ⚠ Gateway is not running — jobs won't fire automatically.", Colors.YELLOW))
|
||||
print(color(" Start it with: hermes gateway install", Colors.DIM))
|
||||
@@ -79,6 +80,7 @@ def cron_list(show_all: bool = False):
|
||||
def cron_tick():
|
||||
"""Run due jobs once and exit."""
|
||||
from cron.scheduler import tick
|
||||
|
||||
tick(verbose=True)
|
||||
|
||||
|
||||
@@ -86,9 +88,9 @@ def cron_status():
|
||||
"""Show cron execution status."""
|
||||
from cron.jobs import list_jobs
|
||||
from hermes_cli.gateway import find_gateway_pids
|
||||
|
||||
|
||||
print()
|
||||
|
||||
|
||||
pids = find_gateway_pids()
|
||||
if pids:
|
||||
print(color("✓ Gateway is running — cron jobs will fire automatically", Colors.GREEN))
|
||||
@@ -99,9 +101,9 @@ def cron_status():
|
||||
print(" To enable automatic execution:")
|
||||
print(" hermes gateway install # Install as system service (recommended)")
|
||||
print(" hermes gateway # Or run in foreground")
|
||||
|
||||
|
||||
print()
|
||||
|
||||
|
||||
jobs = list_jobs(include_disabled=False)
|
||||
if jobs:
|
||||
next_runs = [j.get("next_run_at") for j in jobs if j.get("next_run_at")]
|
||||
@@ -110,24 +112,24 @@ def cron_status():
|
||||
print(f" Next run: {min(next_runs)}")
|
||||
else:
|
||||
print(" No active jobs")
|
||||
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def cron_command(args):
|
||||
"""Handle cron subcommands."""
|
||||
subcmd = getattr(args, 'cron_command', None)
|
||||
|
||||
subcmd = getattr(args, "cron_command", None)
|
||||
|
||||
if subcmd is None or subcmd == "list":
|
||||
show_all = getattr(args, 'all', False)
|
||||
show_all = getattr(args, "all", False)
|
||||
cron_list(show_all)
|
||||
|
||||
|
||||
elif subcmd == "tick":
|
||||
cron_tick()
|
||||
|
||||
|
||||
elif subcmd == "status":
|
||||
cron_status()
|
||||
|
||||
|
||||
else:
|
||||
print(f"Unknown cron command: {subcmd}")
|
||||
print("Usage: hermes cron [list|status|tick]")
|
||||
|
||||
@@ -5,18 +5,18 @@ Diagnoses issues with Hermes Agent setup.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from hermes_cli.config import get_project_root, get_hermes_home, get_env_path
|
||||
from hermes_cli.config import get_env_path, get_hermes_home, get_project_root
|
||||
|
||||
PROJECT_ROOT = get_project_root()
|
||||
HERMES_HOME = get_hermes_home()
|
||||
|
||||
# Load environment variables from ~/.hermes/.env so API key checks work
|
||||
from dotenv import load_dotenv
|
||||
|
||||
_env_path = get_env_path()
|
||||
if _env_path.exists():
|
||||
try:
|
||||
@@ -33,7 +33,6 @@ os.environ.setdefault("MSWEA_SILENT_STARTUP", "1")
|
||||
from hermes_cli.colors import Colors, color
|
||||
from hermes_constants import OPENROUTER_MODELS_URL
|
||||
|
||||
|
||||
_PROVIDER_ENV_HINTS = (
|
||||
"OPENROUTER_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
@@ -56,35 +55,38 @@ def _has_provider_env_config(content: str) -> bool:
|
||||
def check_ok(text: str, detail: str = ""):
|
||||
print(f" {color('✓', Colors.GREEN)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
|
||||
|
||||
|
||||
def check_warn(text: str, detail: str = ""):
|
||||
print(f" {color('⚠', Colors.YELLOW)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
|
||||
|
||||
|
||||
def check_fail(text: str, detail: str = ""):
|
||||
print(f" {color('✗', Colors.RED)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
|
||||
|
||||
|
||||
def check_info(text: str):
|
||||
print(f" {color('→', Colors.CYAN)} {text}")
|
||||
|
||||
|
||||
def run_doctor(args):
|
||||
"""Run diagnostic checks."""
|
||||
should_fix = getattr(args, 'fix', False)
|
||||
|
||||
should_fix = getattr(args, "fix", False)
|
||||
|
||||
issues = []
|
||||
manual_issues = [] # issues that can't be auto-fixed
|
||||
fixed_count = 0
|
||||
|
||||
|
||||
print()
|
||||
print(color("┌─────────────────────────────────────────────────────────┐", Colors.CYAN))
|
||||
print(color("│ 🩺 Hermes Doctor │", Colors.CYAN))
|
||||
print(color("└─────────────────────────────────────────────────────────┘", Colors.CYAN))
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Check: Python version
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Python Environment", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
py_version = sys.version_info
|
||||
if py_version >= (3, 11):
|
||||
check_ok(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}")
|
||||
@@ -96,20 +98,20 @@ def run_doctor(args):
|
||||
else:
|
||||
check_fail(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}", "(3.10+ required)")
|
||||
issues.append("Upgrade Python to 3.10+")
|
||||
|
||||
|
||||
# Check if in virtual environment
|
||||
in_venv = sys.prefix != sys.base_prefix
|
||||
if in_venv:
|
||||
check_ok("Virtual environment active")
|
||||
else:
|
||||
check_warn("Not in virtual environment", "(recommended)")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Check: Required packages
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Required Packages", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
required_packages = [
|
||||
("openai", "OpenAI SDK"),
|
||||
("rich", "Rich (terminal UI)"),
|
||||
@@ -117,13 +119,13 @@ def run_doctor(args):
|
||||
("yaml", "PyYAML"),
|
||||
("httpx", "HTTPX"),
|
||||
]
|
||||
|
||||
|
||||
optional_packages = [
|
||||
("croniter", "Croniter (cron expressions)"),
|
||||
("telegram", "python-telegram-bot"),
|
||||
("discord", "discord.py"),
|
||||
]
|
||||
|
||||
|
||||
for module, name in required_packages:
|
||||
try:
|
||||
__import__(module)
|
||||
@@ -131,25 +133,25 @@ def run_doctor(args):
|
||||
except ImportError:
|
||||
check_fail(name, "(missing)")
|
||||
issues.append(f"Install {name}: uv pip install {module}")
|
||||
|
||||
|
||||
for module, name in optional_packages:
|
||||
try:
|
||||
__import__(module)
|
||||
check_ok(name, "(optional)")
|
||||
except ImportError:
|
||||
check_warn(name, "(optional, not installed)")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Check: Configuration files
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Configuration Files", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
# Check ~/.hermes/.env (primary location for user config)
|
||||
env_path = HERMES_HOME / '.env'
|
||||
env_path = HERMES_HOME / ".env"
|
||||
if env_path.exists():
|
||||
check_ok("~/.hermes/.env file exists")
|
||||
|
||||
|
||||
# Check for common issues
|
||||
content = env_path.read_text()
|
||||
if _has_provider_env_config(content):
|
||||
@@ -159,7 +161,7 @@ def run_doctor(args):
|
||||
issues.append("Run 'hermes setup' to configure API keys")
|
||||
else:
|
||||
# Also check project root as fallback
|
||||
fallback_env = PROJECT_ROOT / '.env'
|
||||
fallback_env = PROJECT_ROOT / ".env"
|
||||
if fallback_env.exists():
|
||||
check_ok(".env file exists (in project directory)")
|
||||
else:
|
||||
@@ -173,17 +175,17 @@ def run_doctor(args):
|
||||
else:
|
||||
check_info("Run 'hermes setup' to create one")
|
||||
issues.append("Run 'hermes setup' to create .env")
|
||||
|
||||
|
||||
# Check ~/.hermes/config.yaml (primary) or project cli-config.yaml (fallback)
|
||||
config_path = HERMES_HOME / 'config.yaml'
|
||||
config_path = HERMES_HOME / "config.yaml"
|
||||
if config_path.exists():
|
||||
check_ok("~/.hermes/config.yaml exists")
|
||||
else:
|
||||
fallback_config = PROJECT_ROOT / 'cli-config.yaml'
|
||||
fallback_config = PROJECT_ROOT / "cli-config.yaml"
|
||||
if fallback_config.exists():
|
||||
check_ok("cli-config.yaml exists (in project directory)")
|
||||
else:
|
||||
example_config = PROJECT_ROOT / 'cli-config.yaml.example'
|
||||
example_config = PROJECT_ROOT / "cli-config.yaml.example"
|
||||
if should_fix and example_config.exists():
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(str(example_config), str(config_path))
|
||||
@@ -194,7 +196,7 @@ def run_doctor(args):
|
||||
manual_issues.append("Create ~/.hermes/config.yaml manually")
|
||||
else:
|
||||
check_warn("config.yaml not found", "(using defaults)")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Check: Auth providers
|
||||
# =========================================================================
|
||||
@@ -202,7 +204,7 @@ def run_doctor(args):
|
||||
print(color("◆ Auth Providers", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
try:
|
||||
from hermes_cli.auth import get_nous_auth_status, get_codex_auth_status
|
||||
from hermes_cli.auth import get_codex_auth_status, get_nous_auth_status
|
||||
|
||||
nous_status = get_nous_auth_status()
|
||||
if nous_status.get("logged_in"):
|
||||
@@ -230,7 +232,7 @@ def run_doctor(args):
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Directory Structure", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
hermes_home = HERMES_HOME
|
||||
if hermes_home.exists():
|
||||
check_ok("~/.hermes directory exists")
|
||||
@@ -241,7 +243,7 @@ def run_doctor(args):
|
||||
fixed_count += 1
|
||||
else:
|
||||
check_warn("~/.hermes not found", "(will be created on first use)")
|
||||
|
||||
|
||||
# Check expected subdirectories
|
||||
expected_subdirs = ["cron", "sessions", "logs", "skills", "memories"]
|
||||
for subdir_name in expected_subdirs:
|
||||
@@ -255,7 +257,7 @@ def run_doctor(args):
|
||||
fixed_count += 1
|
||||
else:
|
||||
check_warn(f"~/.hermes/{subdir_name}/ not found", "(will be created on first use)")
|
||||
|
||||
|
||||
# Check for SOUL.md persona file
|
||||
soul_path = hermes_home / "SOUL.md"
|
||||
if soul_path.exists():
|
||||
@@ -278,7 +280,7 @@ def run_doctor(args):
|
||||
)
|
||||
check_ok("Created ~/.hermes/SOUL.md with basic template")
|
||||
fixed_count += 1
|
||||
|
||||
|
||||
# Check memory directory
|
||||
memories_dir = hermes_home / "memories"
|
||||
if memories_dir.exists():
|
||||
@@ -301,12 +303,13 @@ def run_doctor(args):
|
||||
memories_dir.mkdir(parents=True, exist_ok=True)
|
||||
check_ok("Created ~/.hermes/memories/")
|
||||
fixed_count += 1
|
||||
|
||||
|
||||
# Check SQLite session store
|
||||
state_db_path = hermes_home / "state.db"
|
||||
if state_db_path.exists():
|
||||
try:
|
||||
import sqlite3
|
||||
|
||||
conn = sqlite3.connect(str(state_db_path))
|
||||
cursor = conn.execute("SELECT COUNT(*) FROM sessions")
|
||||
count = cursor.fetchone()[0]
|
||||
@@ -316,26 +319,26 @@ def run_doctor(args):
|
||||
check_warn(f"~/.hermes/state.db exists but has issues: {e}")
|
||||
else:
|
||||
check_info("~/.hermes/state.db not created yet (will be created on first session)")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Check: External tools
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ External Tools", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
# Git
|
||||
if shutil.which("git"):
|
||||
check_ok("git")
|
||||
else:
|
||||
check_warn("git not found", "(optional)")
|
||||
|
||||
|
||||
# ripgrep (optional, for faster file search)
|
||||
if shutil.which("rg"):
|
||||
check_ok("ripgrep (rg)", "(faster file search)")
|
||||
else:
|
||||
check_warn("ripgrep (rg) not found", "(file search uses grep fallback)")
|
||||
check_info("Install for faster search: sudo apt install ripgrep")
|
||||
|
||||
|
||||
# Docker (optional)
|
||||
terminal_env = os.getenv("TERMINAL_ENV", "local")
|
||||
if terminal_env == "docker":
|
||||
@@ -355,7 +358,7 @@ def run_doctor(args):
|
||||
check_ok("docker", "(optional)")
|
||||
else:
|
||||
check_warn("docker not found", "(optional)")
|
||||
|
||||
|
||||
# SSH (if using ssh backend)
|
||||
if terminal_env == "ssh":
|
||||
ssh_host = os.getenv("TERMINAL_SSH_HOST")
|
||||
@@ -364,7 +367,7 @@ def run_doctor(args):
|
||||
result = subprocess.run(
|
||||
["ssh", "-o", "ConnectTimeout=5", "-o", "BatchMode=yes", ssh_host, "echo ok"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
text=True,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
check_ok(f"SSH connection to {ssh_host}")
|
||||
@@ -374,7 +377,7 @@ def run_doctor(args):
|
||||
else:
|
||||
check_fail("TERMINAL_SSH_HOST not set", "(required for TERMINAL_ENV=ssh)")
|
||||
issues.append("Set TERMINAL_SSH_HOST in .env")
|
||||
|
||||
|
||||
# Daytona (if using daytona backend)
|
||||
if terminal_env == "daytona":
|
||||
daytona_key = os.getenv("DAYTONA_API_KEY")
|
||||
@@ -385,6 +388,7 @@ def run_doctor(args):
|
||||
issues.append("Set DAYTONA_API_KEY environment variable")
|
||||
try:
|
||||
from daytona import Daytona
|
||||
|
||||
check_ok("daytona SDK", "(installed)")
|
||||
except ImportError:
|
||||
check_fail("daytona SDK not installed", "(pip install daytona)")
|
||||
@@ -401,7 +405,7 @@ def run_doctor(args):
|
||||
check_warn("agent-browser not installed", "(run: npm install)")
|
||||
else:
|
||||
check_warn("Node.js not found", "(optional, needed for browser tools)")
|
||||
|
||||
|
||||
# npm audit for all Node.js packages
|
||||
if shutil.which("npm"):
|
||||
npm_dirs = [
|
||||
@@ -415,9 +419,12 @@ def run_doctor(args):
|
||||
audit_result = subprocess.run(
|
||||
["npm", "audit", "--json"],
|
||||
cwd=str(npm_dir),
|
||||
capture_output=True, text=True, timeout=30,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
)
|
||||
import json as _json
|
||||
|
||||
audit_data = _json.loads(audit_result.stdout) if audit_result.stdout.strip() else {}
|
||||
vuln_count = audit_data.get("metadata", {}).get("vulnerabilities", {})
|
||||
critical = vuln_count.get("critical", 0)
|
||||
@@ -429,7 +436,7 @@ def run_doctor(args):
|
||||
elif critical > 0 or high > 0:
|
||||
check_warn(
|
||||
f"{label} deps",
|
||||
f"({critical} critical, {high} high, {moderate} moderate — run: cd {npm_dir} && npm audit fix)"
|
||||
f"({critical} critical, {high} high, {moderate} moderate — run: cd {npm_dir} && npm audit fix)",
|
||||
)
|
||||
issues.append(f"{label} has {total} npm vulnerability(ies)")
|
||||
else:
|
||||
@@ -442,47 +449,50 @@ def run_doctor(args):
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ API Connectivity", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
||||
if openrouter_key:
|
||||
print(" Checking OpenRouter API...", end="", flush=True)
|
||||
try:
|
||||
import httpx
|
||||
|
||||
response = httpx.get(
|
||||
OPENROUTER_MODELS_URL,
|
||||
headers={"Authorization": f"Bearer {openrouter_key}"},
|
||||
timeout=10
|
||||
OPENROUTER_MODELS_URL, headers={"Authorization": f"Bearer {openrouter_key}"}, timeout=10
|
||||
)
|
||||
if response.status_code == 200:
|
||||
print(f"\r {color('✓', Colors.GREEN)} OpenRouter API ")
|
||||
elif response.status_code == 401:
|
||||
print(f"\r {color('✗', Colors.RED)} OpenRouter API {color('(invalid API key)', Colors.DIM)} ")
|
||||
print(
|
||||
f"\r {color('✗', Colors.RED)} OpenRouter API {color('(invalid API key)', Colors.DIM)} "
|
||||
)
|
||||
issues.append("Check OPENROUTER_API_KEY in .env")
|
||||
else:
|
||||
print(f"\r {color('✗', Colors.RED)} OpenRouter API {color(f'(HTTP {response.status_code})', Colors.DIM)} ")
|
||||
print(
|
||||
f"\r {color('✗', Colors.RED)} OpenRouter API {color(f'(HTTP {response.status_code})', Colors.DIM)} "
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"\r {color('✗', Colors.RED)} OpenRouter API {color(f'({e})', Colors.DIM)} ")
|
||||
issues.append("Check network connectivity")
|
||||
else:
|
||||
check_warn("OpenRouter API", "(not configured)")
|
||||
|
||||
|
||||
anthropic_key = os.getenv("ANTHROPIC_API_KEY")
|
||||
if anthropic_key:
|
||||
print(" Checking Anthropic API...", end="", flush=True)
|
||||
try:
|
||||
import httpx
|
||||
|
||||
response = httpx.get(
|
||||
"https://api.anthropic.com/v1/models",
|
||||
headers={
|
||||
"x-api-key": anthropic_key,
|
||||
"anthropic-version": "2023-06-01"
|
||||
},
|
||||
timeout=10
|
||||
headers={"x-api-key": anthropic_key, "anthropic-version": "2023-06-01"},
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code == 200:
|
||||
print(f"\r {color('✓', Colors.GREEN)} Anthropic API ")
|
||||
elif response.status_code == 401:
|
||||
print(f"\r {color('✗', Colors.RED)} Anthropic API {color('(invalid API key)', Colors.DIM)} ")
|
||||
print(
|
||||
f"\r {color('✗', Colors.RED)} Anthropic API {color('(invalid API key)', Colors.DIM)} "
|
||||
)
|
||||
else:
|
||||
msg = "(couldn't verify)"
|
||||
print(f"\r {color('⚠', Colors.YELLOW)} Anthropic API {color(msg, Colors.DIM)} ")
|
||||
@@ -491,10 +501,15 @@ def run_doctor(args):
|
||||
|
||||
# -- API-key providers (Z.AI/GLM, Kimi, MiniMax, MiniMax-CN) --
|
||||
_apikey_providers = [
|
||||
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL"),
|
||||
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL"),
|
||||
("MiniMax", ("MINIMAX_API_KEY",), "https://api.minimax.io/v1/models", "MINIMAX_BASE_URL"),
|
||||
("MiniMax (China)", ("MINIMAX_CN_API_KEY",), "https://api.minimaxi.com/v1/models", "MINIMAX_CN_BASE_URL"),
|
||||
(
|
||||
"Z.AI / GLM",
|
||||
("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"),
|
||||
"https://api.z.ai/api/paas/v4/models",
|
||||
"GLM_BASE_URL",
|
||||
),
|
||||
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL"),
|
||||
("MiniMax", ("MINIMAX_API_KEY",), "https://api.minimax.io/v1/models", "MINIMAX_BASE_URL"),
|
||||
("MiniMax (China)", ("MINIMAX_CN_API_KEY",), "https://api.minimaxi.com/v1/models", "MINIMAX_CN_BASE_URL"),
|
||||
]
|
||||
for _pname, _env_vars, _default_url, _base_env in _apikey_providers:
|
||||
_key = ""
|
||||
@@ -507,6 +522,7 @@ def run_doctor(args):
|
||||
print(f" Checking {_pname} API...", end="", flush=True)
|
||||
try:
|
||||
import httpx
|
||||
|
||||
_base = os.getenv(_base_env, "")
|
||||
# Auto-detect Kimi Code keys (sk-kimi-) → api.kimi.com
|
||||
if not _base and _key.startswith("sk-kimi-"):
|
||||
@@ -526,7 +542,9 @@ def run_doctor(args):
|
||||
print(f"\r {color('✗', Colors.RED)} {_label} {color('(invalid API key)', Colors.DIM)} ")
|
||||
issues.append(f"Check {_env_vars[0]} in .env")
|
||||
else:
|
||||
print(f"\r {color('⚠', Colors.YELLOW)} {_label} {color(f'(HTTP {_resp.status_code})', Colors.DIM)} ")
|
||||
print(
|
||||
f"\r {color('⚠', Colors.YELLOW)} {_label} {color(f'(HTTP {_resp.status_code})', Colors.DIM)} "
|
||||
)
|
||||
except Exception as _e:
|
||||
print(f"\r {color('⚠', Colors.YELLOW)} {_label} {color(f'({_e})', Colors.DIM)} ")
|
||||
|
||||
@@ -535,7 +553,7 @@ def run_doctor(args):
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Submodules", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
# mini-swe-agent (terminal tool backend)
|
||||
mini_swe_dir = PROJECT_ROOT / "mini-swe-agent"
|
||||
if mini_swe_dir.exists() and (mini_swe_dir / "pyproject.toml").exists():
|
||||
@@ -547,7 +565,7 @@ def run_doctor(args):
|
||||
issues.append("Install mini-swe-agent: uv pip install -e ./mini-swe-agent")
|
||||
else:
|
||||
check_warn("mini-swe-agent not found", "(run: git submodule update --init --recursive)")
|
||||
|
||||
|
||||
# tinker-atropos (RL training backend)
|
||||
tinker_dir = PROJECT_ROOT / "tinker-atropos"
|
||||
if tinker_dir.exists() and (tinker_dir / "pyproject.toml").exists():
|
||||
@@ -562,24 +580,24 @@ def run_doctor(args):
|
||||
check_warn("tinker-atropos requires Python 3.11+", f"(current: {py_version.major}.{py_version.minor})")
|
||||
else:
|
||||
check_warn("tinker-atropos not found", "(run: git submodule update --init --recursive)")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Check: Tool Availability
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Tool Availability", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
try:
|
||||
# Add project root to path for imports
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
from model_tools import check_tool_availability, TOOLSET_REQUIREMENTS
|
||||
|
||||
from model_tools import TOOLSET_REQUIREMENTS, check_tool_availability
|
||||
|
||||
available, unavailable = check_tool_availability()
|
||||
|
||||
|
||||
for tid in available:
|
||||
info = TOOLSET_REQUIREMENTS.get(tid, {})
|
||||
check_ok(info.get("name", tid))
|
||||
|
||||
|
||||
for item in unavailable:
|
||||
env_vars = item.get("missing_vars") or item.get("env_vars") or []
|
||||
if env_vars:
|
||||
@@ -594,7 +612,7 @@ def run_doctor(args):
|
||||
issues.append("Run 'hermes setup' to configure missing API keys for full tool access")
|
||||
except Exception as e:
|
||||
check_warn("Could not check tool availability", f"({e})")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Check: Skills Hub
|
||||
# =========================================================================
|
||||
@@ -608,6 +626,7 @@ def run_doctor(args):
|
||||
if lock_file.exists():
|
||||
try:
|
||||
import json
|
||||
|
||||
lock_data = json.loads(lock_file.read_text())
|
||||
count = len(lock_data.get("installed", {}))
|
||||
check_ok(f"Lock file OK ({count} hub-installed skill(s))")
|
||||
@@ -621,6 +640,7 @@ def run_doctor(args):
|
||||
check_warn("Skills Hub directory not initialized", "(run: hermes skills list)")
|
||||
|
||||
from hermes_cli.config import get_env_value
|
||||
|
||||
github_token = get_env_value("GITHUB_TOKEN") or get_env_value("GH_TOKEN")
|
||||
if github_token:
|
||||
check_ok("GitHub token configured (authenticated API access)")
|
||||
@@ -656,5 +676,5 @@ def run_doctor(args):
|
||||
else:
|
||||
print(color("─" * 60, Colors.GREEN))
|
||||
print(color(" All checks passed! 🎉", Colors.GREEN, Colors.BOLD))
|
||||
|
||||
|
||||
print()
|
||||
|
||||
@@ -13,18 +13,24 @@ from pathlib import Path
|
||||
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
|
||||
|
||||
from hermes_cli.colors import Colors, color
|
||||
from hermes_cli.config import get_env_value, save_env_value
|
||||
from hermes_cli.setup import (
|
||||
print_header, print_info, print_success, print_warning, print_error,
|
||||
prompt, prompt_choice, prompt_yes_no,
|
||||
print_error,
|
||||
print_header,
|
||||
print_info,
|
||||
print_success,
|
||||
print_warning,
|
||||
prompt,
|
||||
prompt_choice,
|
||||
prompt_yes_no,
|
||||
)
|
||||
from hermes_cli.colors import Colors, color
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Process Management (for manual gateway runs)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def find_gateway_pids() -> list:
|
||||
"""Find PIDs of running gateway processes."""
|
||||
pids = []
|
||||
@@ -38,17 +44,16 @@ def find_gateway_pids() -> list:
|
||||
if is_windows():
|
||||
# Windows: use wmic to search command lines
|
||||
result = subprocess.run(
|
||||
["wmic", "process", "get", "ProcessId,CommandLine", "/FORMAT:LIST"],
|
||||
capture_output=True, text=True
|
||||
["wmic", "process", "get", "ProcessId,CommandLine", "/FORMAT:LIST"], capture_output=True, text=True
|
||||
)
|
||||
# Parse WMIC LIST output: blocks of "CommandLine=...\nProcessId=...\n"
|
||||
current_cmd = ""
|
||||
for line in result.stdout.split('\n'):
|
||||
for line in result.stdout.split("\n"):
|
||||
line = line.strip()
|
||||
if line.startswith("CommandLine="):
|
||||
current_cmd = line[len("CommandLine="):]
|
||||
current_cmd = line[len("CommandLine=") :]
|
||||
elif line.startswith("ProcessId="):
|
||||
pid_str = line[len("ProcessId="):]
|
||||
pid_str = line[len("ProcessId=") :]
|
||||
if any(p in current_cmd for p in patterns):
|
||||
try:
|
||||
pid = int(pid_str)
|
||||
@@ -58,14 +63,10 @@ def find_gateway_pids() -> list:
|
||||
pass
|
||||
current_cmd = ""
|
||||
else:
|
||||
result = subprocess.run(
|
||||
["ps", "aux"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
for line in result.stdout.split('\n'):
|
||||
result = subprocess.run(["ps", "aux"], capture_output=True, text=True)
|
||||
for line in result.stdout.split("\n"):
|
||||
# Skip grep and current process
|
||||
if 'grep' in line or str(os.getpid()) in line:
|
||||
if "grep" in line or str(os.getpid()) in line:
|
||||
continue
|
||||
for pattern in patterns:
|
||||
if pattern in line:
|
||||
@@ -88,7 +89,7 @@ def kill_gateway_processes(force: bool = False) -> int:
|
||||
"""Kill any running gateway processes. Returns count killed."""
|
||||
pids = find_gateway_pids()
|
||||
killed = 0
|
||||
|
||||
|
||||
for pid in pids:
|
||||
try:
|
||||
if force and not is_windows():
|
||||
@@ -101,18 +102,20 @@ def kill_gateway_processes(force: bool = False) -> int:
|
||||
pass
|
||||
except PermissionError:
|
||||
print(f"⚠ Permission denied to kill PID {pid}")
|
||||
|
||||
|
||||
return killed
|
||||
|
||||
|
||||
def is_linux() -> bool:
|
||||
return sys.platform.startswith('linux')
|
||||
return sys.platform.startswith("linux")
|
||||
|
||||
|
||||
def is_macos() -> bool:
|
||||
return sys.platform == 'darwin'
|
||||
return sys.platform == "darwin"
|
||||
|
||||
|
||||
def is_windows() -> bool:
|
||||
return sys.platform == 'win32'
|
||||
return sys.platform == "win32"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
@@ -122,12 +125,15 @@ def is_windows() -> bool:
|
||||
SERVICE_NAME = "hermes-gateway"
|
||||
SERVICE_DESCRIPTION = "Hermes Agent Gateway - Messaging Platform Integration"
|
||||
|
||||
|
||||
def get_systemd_unit_path() -> Path:
|
||||
return Path.home() / ".config" / "systemd" / "user" / f"{SERVICE_NAME}.service"
|
||||
|
||||
|
||||
def get_launchd_plist_path() -> Path:
|
||||
return Path.home() / "Library" / "LaunchAgents" / "ai.hermes.gateway.plist"
|
||||
|
||||
|
||||
def get_python_path() -> str:
|
||||
if is_windows():
|
||||
venv_python = PROJECT_ROOT / "venv" / "Scripts" / "python.exe"
|
||||
@@ -137,14 +143,16 @@ def get_python_path() -> str:
|
||||
return str(venv_python)
|
||||
return sys.executable
|
||||
|
||||
|
||||
def get_hermes_cli_path() -> str:
|
||||
"""Get the path to the hermes CLI."""
|
||||
# Check if installed via pip
|
||||
import shutil
|
||||
|
||||
hermes_bin = shutil.which("hermes")
|
||||
if hermes_bin:
|
||||
return hermes_bin
|
||||
|
||||
|
||||
# Fallback to direct module execution
|
||||
return f"{get_python_path()} -m hermes_cli.main"
|
||||
|
||||
@@ -153,8 +161,10 @@ def get_hermes_cli_path() -> str:
|
||||
# Systemd (Linux)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def generate_systemd_unit() -> str:
|
||||
import shutil
|
||||
|
||||
python_path = get_python_path()
|
||||
working_dir = str(PROJECT_ROOT)
|
||||
venv_dir = str(PROJECT_ROOT / "venv")
|
||||
@@ -163,7 +173,7 @@ def generate_systemd_unit() -> str:
|
||||
|
||||
# Build a PATH that includes the venv, node_modules, and standard system dirs
|
||||
sane_path = f"{venv_bin}:{node_bin}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
||||
|
||||
hermes_cli = shutil.which("hermes") or f"{python_path} -m hermes_cli.main"
|
||||
return f"""[Unit]
|
||||
Description={SERVICE_DESCRIPTION}
|
||||
@@ -188,56 +198,62 @@ StandardError=journal
|
||||
WantedBy=default.target
|
||||
"""
|
||||
|
||||
|
||||
def systemd_install(force: bool = False):
|
||||
unit_path = get_systemd_unit_path()
|
||||
|
||||
|
||||
if unit_path.exists() and not force:
|
||||
print(f"Service already installed at: {unit_path}")
|
||||
print("Use --force to reinstall")
|
||||
return
|
||||
|
||||
|
||||
unit_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
print(f"Installing systemd service to: {unit_path}")
|
||||
unit_path.write_text(generate_systemd_unit())
|
||||
|
||||
|
||||
subprocess.run(["systemctl", "--user", "daemon-reload"], check=True)
|
||||
subprocess.run(["systemctl", "--user", "enable", SERVICE_NAME], check=True)
|
||||
|
||||
|
||||
print()
|
||||
print("✓ Service installed and enabled!")
|
||||
print()
|
||||
print("Next steps:")
|
||||
print(f" hermes gateway start # Start the service")
|
||||
print(f" hermes gateway status # Check status")
|
||||
print(" hermes gateway start # Start the service")
|
||||
print(" hermes gateway status # Check status")
|
||||
print(f" journalctl --user -u {SERVICE_NAME} -f # View logs")
|
||||
print()
|
||||
print("To enable lingering (keeps running after logout):")
|
||||
print(" sudo loginctl enable-linger $USER")
|
||||
|
||||
|
||||
def systemd_uninstall():
|
||||
subprocess.run(["systemctl", "--user", "stop", SERVICE_NAME], check=False)
|
||||
subprocess.run(["systemctl", "--user", "disable", SERVICE_NAME], check=False)
|
||||
|
||||
|
||||
unit_path = get_systemd_unit_path()
|
||||
if unit_path.exists():
|
||||
unit_path.unlink()
|
||||
print(f"✓ Removed {unit_path}")
|
||||
|
||||
|
||||
subprocess.run(["systemctl", "--user", "daemon-reload"], check=True)
|
||||
print("✓ Service uninstalled")
|
||||
|
||||
|
||||
def systemd_start():
|
||||
subprocess.run(["systemctl", "--user", "start", SERVICE_NAME], check=True)
|
||||
print("✓ Service started")
|
||||
|
||||
|
||||
def systemd_stop():
|
||||
subprocess.run(["systemctl", "--user", "stop", SERVICE_NAME], check=True)
|
||||
print("✓ Service stopped")
|
||||
|
||||
|
||||
def systemd_restart():
|
||||
subprocess.run(["systemctl", "--user", "restart", SERVICE_NAME], check=True)
|
||||
print("✓ Service restarted")
|
||||
|
||||
|
||||
def systemd_status(deep: bool = False):
|
||||
# Check if service unit file exists
|
||||
unit_path = get_systemd_unit_path()
|
||||
@@ -245,54 +261,45 @@ def systemd_status(deep: bool = False):
|
||||
print("✗ Gateway service is not installed")
|
||||
print(" Run: hermes gateway install")
|
||||
return
|
||||
|
||||
|
||||
# Show detailed status first
|
||||
subprocess.run(
|
||||
["systemctl", "--user", "status", SERVICE_NAME, "--no-pager"],
|
||||
capture_output=False
|
||||
)
|
||||
|
||||
subprocess.run(["systemctl", "--user", "status", SERVICE_NAME, "--no-pager"], capture_output=False)
|
||||
|
||||
# Check if service is active
|
||||
result = subprocess.run(
|
||||
["systemctl", "--user", "is-active", SERVICE_NAME],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
result = subprocess.run(["systemctl", "--user", "is-active", SERVICE_NAME], capture_output=True, text=True)
|
||||
|
||||
status = result.stdout.strip()
|
||||
|
||||
|
||||
if status == "active":
|
||||
print("✓ Gateway service is running")
|
||||
else:
|
||||
print("✗ Gateway service is stopped")
|
||||
print(" Run: hermes gateway start")
|
||||
|
||||
|
||||
if deep:
|
||||
print()
|
||||
print("Recent logs:")
|
||||
subprocess.run([
|
||||
"journalctl", "--user", "-u", SERVICE_NAME,
|
||||
"-n", "20", "--no-pager"
|
||||
])
|
||||
subprocess.run(["journalctl", "--user", "-u", SERVICE_NAME, "-n", "20", "--no-pager"])
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Launchd (macOS)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def generate_launchd_plist() -> str:
|
||||
python_path = get_python_path()
|
||||
working_dir = str(PROJECT_ROOT)
|
||||
log_dir = Path.home() / ".hermes" / "logs"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
return f"""<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>ai.hermes.gateway</string>
|
||||
|
||||
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>{python_path}</string>
|
||||
@@ -301,42 +308,43 @@ def generate_launchd_plist() -> str:
|
||||
<string>gateway</string>
|
||||
<string>run</string>
|
||||
</array>
|
||||
|
||||
|
||||
<key>WorkingDirectory</key>
|
||||
<string>{working_dir}</string>
|
||||
|
||||
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
|
||||
|
||||
<key>KeepAlive</key>
|
||||
<dict>
|
||||
<key>SuccessfulExit</key>
|
||||
<false/>
|
||||
</dict>
|
||||
|
||||
|
||||
<key>StandardOutPath</key>
|
||||
<string>{log_dir}/gateway.log</string>
|
||||
|
||||
|
||||
<key>StandardErrorPath</key>
|
||||
<string>{log_dir}/gateway.error.log</string>
|
||||
</dict>
|
||||
</plist>
|
||||
"""
|
||||
|
||||
|
||||
def launchd_install(force: bool = False):
|
||||
plist_path = get_launchd_plist_path()
|
||||
|
||||
|
||||
if plist_path.exists() and not force:
|
||||
print(f"Service already installed at: {plist_path}")
|
||||
print("Use --force to reinstall")
|
||||
return
|
||||
|
||||
|
||||
plist_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
print(f"Installing launchd service to: {plist_path}")
|
||||
plist_path.write_text(generate_launchd_plist())
|
||||
|
||||
|
||||
subprocess.run(["launchctl", "load", str(plist_path)], check=True)
|
||||
|
||||
|
||||
print()
|
||||
print("✓ Service installed and loaded!")
|
||||
print()
|
||||
@@ -344,41 +352,42 @@ def launchd_install(force: bool = False):
|
||||
print(" hermes gateway status # Check status")
|
||||
print(" tail -f ~/.hermes/logs/gateway.log # View logs")
|
||||
|
||||
|
||||
def launchd_uninstall():
|
||||
plist_path = get_launchd_plist_path()
|
||||
subprocess.run(["launchctl", "unload", str(plist_path)], check=False)
|
||||
|
||||
|
||||
if plist_path.exists():
|
||||
plist_path.unlink()
|
||||
print(f"✓ Removed {plist_path}")
|
||||
|
||||
|
||||
print("✓ Service uninstalled")
|
||||
|
||||
|
||||
def launchd_start():
|
||||
subprocess.run(["launchctl", "start", "ai.hermes.gateway"], check=True)
|
||||
print("✓ Service started")
|
||||
|
||||
|
||||
def launchd_stop():
|
||||
subprocess.run(["launchctl", "stop", "ai.hermes.gateway"], check=True)
|
||||
print("✓ Service stopped")
|
||||
|
||||
|
||||
def launchd_restart():
|
||||
launchd_stop()
|
||||
launchd_start()
|
||||
|
||||
|
||||
def launchd_status(deep: bool = False):
|
||||
result = subprocess.run(
|
||||
["launchctl", "list", "ai.hermes.gateway"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
result = subprocess.run(["launchctl", "list", "ai.hermes.gateway"], capture_output=True, text=True)
|
||||
|
||||
if result.returncode == 0:
|
||||
print("✓ Gateway service is loaded")
|
||||
print(result.stdout)
|
||||
else:
|
||||
print("✗ Gateway service is not loaded")
|
||||
|
||||
|
||||
if deep:
|
||||
log_file = Path.home() / ".hermes" / "logs" / "gateway.log"
|
||||
if log_file.exists():
|
||||
@@ -391,9 +400,10 @@ def launchd_status(deep: bool = False):
|
||||
# Gateway Runner
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def run_gateway(verbose: bool = False, replace: bool = False):
|
||||
"""Run the gateway in foreground.
|
||||
|
||||
|
||||
Args:
|
||||
verbose: Enable verbose logging output.
|
||||
replace: If True, kill any existing gateway instance before starting.
|
||||
@@ -401,9 +411,9 @@ def run_gateway(verbose: bool = False, replace: bool = False):
|
||||
hasn't fully exited yet.
|
||||
"""
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
|
||||
from gateway.run import start_gateway
|
||||
|
||||
|
||||
print("┌─────────────────────────────────────────────────────────┐")
|
||||
print("│ ⚕ Hermes Gateway Starting... │")
|
||||
print("├─────────────────────────────────────────────────────────┤")
|
||||
@@ -411,7 +421,7 @@ def run_gateway(verbose: bool = False, replace: bool = False):
|
||||
print("│ Press Ctrl+C to stop │")
|
||||
print("└─────────────────────────────────────────────────────────┘")
|
||||
print()
|
||||
|
||||
|
||||
# Exit with code 1 if gateway fails to connect any platform,
|
||||
# so systemd Restart=on-failure will retry on transient errors
|
||||
success = asyncio.run(start_gateway(replace=replace))
|
||||
@@ -438,13 +448,25 @@ _PLATFORMS = [
|
||||
"4. To find your user ID: message @userinfobot — it replies with your numeric ID",
|
||||
],
|
||||
"vars": [
|
||||
{"name": "TELEGRAM_BOT_TOKEN", "prompt": "Bot token", "password": True,
|
||||
"help": "Paste the token from @BotFather (step 3 above)."},
|
||||
{"name": "TELEGRAM_ALLOWED_USERS", "prompt": "Allowed user IDs (comma-separated)", "password": False,
|
||||
"is_allowlist": True,
|
||||
"help": "Paste your user ID from step 4 above."},
|
||||
{"name": "TELEGRAM_HOME_CHANNEL", "prompt": "Home channel ID (for cron/notification delivery, or empty to set later with /set-home)", "password": False,
|
||||
"help": "For DMs, this is your user ID. You can set it later by typing /set-home in chat."},
|
||||
{
|
||||
"name": "TELEGRAM_BOT_TOKEN",
|
||||
"prompt": "Bot token",
|
||||
"password": True,
|
||||
"help": "Paste the token from @BotFather (step 3 above).",
|
||||
},
|
||||
{
|
||||
"name": "TELEGRAM_ALLOWED_USERS",
|
||||
"prompt": "Allowed user IDs (comma-separated)",
|
||||
"password": False,
|
||||
"is_allowlist": True,
|
||||
"help": "Paste your user ID from step 4 above.",
|
||||
},
|
||||
{
|
||||
"name": "TELEGRAM_HOME_CHANNEL",
|
||||
"prompt": "Home channel ID (for cron/notification delivery, or empty to set later with /set-home)",
|
||||
"password": False,
|
||||
"help": "For DMs, this is your user ID. You can set it later by typing /set-home in chat.",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -466,13 +488,25 @@ _PLATFORMS = [
|
||||
" then right-click your name → Copy ID",
|
||||
],
|
||||
"vars": [
|
||||
{"name": "DISCORD_BOT_TOKEN", "prompt": "Bot token", "password": True,
|
||||
"help": "Paste the token from step 2 above."},
|
||||
{"name": "DISCORD_ALLOWED_USERS", "prompt": "Allowed user IDs or usernames (comma-separated)", "password": False,
|
||||
"is_allowlist": True,
|
||||
"help": "Paste your user ID from step 5 above."},
|
||||
{"name": "DISCORD_HOME_CHANNEL", "prompt": "Home channel ID (for cron/notification delivery, or empty to set later with /set-home)", "password": False,
|
||||
"help": "Right-click a channel → Copy Channel ID (requires Developer Mode)."},
|
||||
{
|
||||
"name": "DISCORD_BOT_TOKEN",
|
||||
"prompt": "Bot token",
|
||||
"password": True,
|
||||
"help": "Paste the token from step 2 above.",
|
||||
},
|
||||
{
|
||||
"name": "DISCORD_ALLOWED_USERS",
|
||||
"prompt": "Allowed user IDs or usernames (comma-separated)",
|
||||
"password": False,
|
||||
"is_allowlist": True,
|
||||
"help": "Paste your user ID from step 5 above.",
|
||||
},
|
||||
{
|
||||
"name": "DISCORD_HOME_CHANNEL",
|
||||
"prompt": "Home channel ID (for cron/notification delivery, or empty to set later with /set-home)",
|
||||
"password": False,
|
||||
"help": "Right-click a channel → Copy Channel ID (requires Developer Mode).",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -482,23 +516,40 @@ _PLATFORMS = [
|
||||
"token_var": "SLACK_BOT_TOKEN",
|
||||
"setup_instructions": [
|
||||
"1. Go to https://api.slack.com/apps → Create New App → From Scratch",
|
||||
"2. Enable Socket Mode: App Settings → Socket Mode → Enable",
|
||||
"3. Get Bot Token: OAuth & Permissions → Install to Workspace → copy xoxb-... token",
|
||||
"4. Get App Token: Basic Information → App-Level Tokens → Generate",
|
||||
" Name it anything, add scope: connections:write → copy xapp-... token",
|
||||
"5. Add bot scopes: OAuth & Permissions → Scopes → chat:write, im:history,",
|
||||
" im:read, im:write, channels:history, channels:read",
|
||||
"6. Reinstall the app to your workspace after adding scopes",
|
||||
"2. Enable Socket Mode: Settings → Socket Mode → Enable",
|
||||
" Create an App-Level Token with scope: connections:write → copy xapp-... token",
|
||||
"3. Add Bot Token Scopes: Features → OAuth & Permissions → Scopes",
|
||||
" Required: chat:write, app_mentions:read, channels:history, channels:read,",
|
||||
" groups:history, im:history, im:read, im:write, users:read, files:write",
|
||||
"4. Subscribe to Events: Features → Event Subscriptions → Enable",
|
||||
" Required events: message.im, message.channels, app_mention",
|
||||
" Optional: message.groups (for private channels)",
|
||||
" ⚠ Without message.channels the bot will ONLY work in DMs!",
|
||||
"5. Install to Workspace: Settings → Install App → copy xoxb-... token",
|
||||
"6. Reinstall the app after any scope or event changes",
|
||||
"7. Find your user ID: click your profile → three dots → Copy member ID",
|
||||
"8. Invite the bot to channels: /invite @YourBot",
|
||||
],
|
||||
"vars": [
|
||||
{"name": "SLACK_BOT_TOKEN", "prompt": "Bot Token (xoxb-...)", "password": True,
|
||||
"help": "Paste the bot token from step 3 above."},
|
||||
{"name": "SLACK_APP_TOKEN", "prompt": "App Token (xapp-...)", "password": True,
|
||||
"help": "Paste the app-level token from step 4 above."},
|
||||
{"name": "SLACK_ALLOWED_USERS", "prompt": "Allowed user IDs (comma-separated)", "password": False,
|
||||
"is_allowlist": True,
|
||||
"help": "Paste your member ID from step 7 above."},
|
||||
{
|
||||
"name": "SLACK_BOT_TOKEN",
|
||||
"prompt": "Bot Token (xoxb-...)",
|
||||
"password": True,
|
||||
"help": "Paste the bot token from step 3 above.",
|
||||
},
|
||||
{
|
||||
"name": "SLACK_APP_TOKEN",
|
||||
"prompt": "App Token (xapp-...)",
|
||||
"password": True,
|
||||
"help": "Paste the app-level token from step 4 above.",
|
||||
},
|
||||
{
|
||||
"name": "SLACK_ALLOWED_USERS",
|
||||
"prompt": "Allowed user IDs (comma-separated)",
|
||||
"password": False,
|
||||
"is_allowlist": True,
|
||||
"help": "Paste your member ID from step 7 above.",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -577,14 +628,14 @@ def _setup_standard_platform(platform: dict):
|
||||
|
||||
# Allowlist fields get special handling for the deny-by-default security model
|
||||
if var.get("is_allowlist"):
|
||||
print_info(f" The gateway DENIES all users by default for security.")
|
||||
print_info(f" Enter user IDs to create an allowlist, or leave empty")
|
||||
print_info(f" and you'll be asked about open access next.")
|
||||
print_info(" The gateway DENIES all users by default for security.")
|
||||
print_info(" Enter user IDs to create an allowlist, or leave empty")
|
||||
print_info(" and you'll be asked about open access next.")
|
||||
value = prompt(f" {var['prompt']}", password=False)
|
||||
if value:
|
||||
cleaned = value.replace(" ", "")
|
||||
save_env_value(var["name"], cleaned)
|
||||
print_success(f" Saved — only these users can interact with the bot.")
|
||||
print_success(" Saved — only these users can interact with the bot.")
|
||||
allowed_val_set = cleaned
|
||||
else:
|
||||
# No allowlist — ask about open access vs DM pairing
|
||||
@@ -613,7 +664,7 @@ def _setup_standard_platform(platform: dict):
|
||||
print_warning(f" Skipped — {label} won't work without this.")
|
||||
return
|
||||
else:
|
||||
print_info(f" Skipped (can configure later)")
|
||||
print_info(" Skipped (can configure later)")
|
||||
|
||||
# If an allowlist was set and home channel wasn't, offer to reuse
|
||||
# the first user ID (common for Telegram DMs).
|
||||
@@ -631,8 +682,10 @@ def _setup_standard_platform(platform: dict):
|
||||
|
||||
def _setup_whatsapp():
|
||||
"""Delegate to the existing WhatsApp setup flow."""
|
||||
from hermes_cli.main import cmd_whatsapp
|
||||
import argparse
|
||||
|
||||
from hermes_cli.main import cmd_whatsapp
|
||||
|
||||
cmd_whatsapp(argparse.Namespace())
|
||||
|
||||
|
||||
@@ -648,16 +701,10 @@ def _is_service_installed() -> bool:
|
||||
def _is_service_running() -> bool:
|
||||
"""Check if the gateway service is currently running."""
|
||||
if is_linux() and get_systemd_unit_path().exists():
|
||||
result = subprocess.run(
|
||||
["systemctl", "--user", "is-active", SERVICE_NAME],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
result = subprocess.run(["systemctl", "--user", "is-active", SERVICE_NAME], capture_output=True, text=True)
|
||||
return result.stdout.strip() == "active"
|
||||
elif is_macos() and get_launchd_plist_path().exists():
|
||||
result = subprocess.run(
|
||||
["launchctl", "list", "ai.hermes.gateway"],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
result = subprocess.run(["launchctl", "list", "ai.hermes.gateway"], capture_output=True, text=True)
|
||||
return result.returncode == 0
|
||||
# Check for manual processes
|
||||
return len(find_gateway_pids()) > 0
|
||||
@@ -692,7 +739,7 @@ def _setup_signal():
|
||||
print_info(" Docker: bbernhard/signal-cli-rest-api")
|
||||
print()
|
||||
print_info(" After installing, link your account and start the daemon:")
|
||||
print_info(" signal-cli link -n \"HermesAgent\"")
|
||||
print_info(' signal-cli link -n "HermesAgent"')
|
||||
print_info(" signal-cli --account +YOURNUMBER daemon --http 127.0.0.1:8080")
|
||||
print()
|
||||
|
||||
@@ -710,6 +757,7 @@ def _setup_signal():
|
||||
print_info(" Testing connection...")
|
||||
try:
|
||||
import httpx
|
||||
|
||||
resp = httpx.get(f"{url.rstrip('/')}/api/v1/check", timeout=10.0)
|
||||
if resp.status_code == 200:
|
||||
print_success(" signal-cli daemon is reachable!")
|
||||
@@ -774,7 +822,7 @@ def _setup_signal():
|
||||
print_success("Signal configured!")
|
||||
print_info(f" URL: {url}")
|
||||
print_info(f" Account: {account}")
|
||||
print_info(f" DM auth: via SIGNAL_ALLOWED_USERS + DM pairing")
|
||||
print_info(" DM auth: via SIGNAL_ALLOWED_USERS + DM pairing")
|
||||
print_info(f" Groups: {'enabled' if get_env_value('SIGNAL_GROUP_ALLOWED_USERS') else 'disabled'}")
|
||||
|
||||
|
||||
@@ -836,11 +884,10 @@ def gateway_setup():
|
||||
_setup_standard_platform(platform)
|
||||
|
||||
# ── Post-setup: offer to install/restart gateway ──
|
||||
any_configured = any(
|
||||
bool(get_env_value(p["token_var"]))
|
||||
for p in _PLATFORMS
|
||||
if p["key"] != "whatsapp"
|
||||
) or (get_env_value("WHATSAPP_ENABLED") or "").lower() == "true"
|
||||
any_configured = (
|
||||
any(bool(get_env_value(p["token_var"])) for p in _PLATFORMS if p["key"] != "whatsapp")
|
||||
or (get_env_value("WHATSAPP_ENABLED") or "").lower() == "true"
|
||||
)
|
||||
|
||||
if any_configured:
|
||||
print()
|
||||
@@ -873,7 +920,9 @@ def gateway_setup():
|
||||
print()
|
||||
if is_linux() or is_macos():
|
||||
platform_name = "systemd" if is_linux() else "launchd"
|
||||
if prompt_yes_no(f" Install the gateway as a {platform_name} service? (runs in background, starts on boot)", True):
|
||||
if prompt_yes_no(
|
||||
f" Install the gateway as a {platform_name} service? (runs in background, starts on boot)", True
|
||||
):
|
||||
try:
|
||||
force = False
|
||||
if is_linux():
|
||||
@@ -909,14 +958,15 @@ def gateway_setup():
|
||||
# Main Command Handler
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def gateway_command(args):
|
||||
"""Handle gateway subcommands."""
|
||||
subcmd = getattr(args, 'gateway_command', None)
|
||||
|
||||
subcmd = getattr(args, "gateway_command", None)
|
||||
|
||||
# Default to run if no subcommand
|
||||
if subcmd is None or subcmd == "run":
|
||||
verbose = getattr(args, 'verbose', False)
|
||||
replace = getattr(args, 'replace', False)
|
||||
verbose = getattr(args, "verbose", False)
|
||||
replace = getattr(args, "replace", False)
|
||||
run_gateway(verbose, replace=replace)
|
||||
return
|
||||
|
||||
@@ -926,7 +976,7 @@ def gateway_command(args):
|
||||
|
||||
# Service management commands
|
||||
if subcmd == "install":
|
||||
force = getattr(args, 'force', False)
|
||||
force = getattr(args, "force", False)
|
||||
if is_linux():
|
||||
systemd_install(force)
|
||||
elif is_macos():
|
||||
@@ -935,7 +985,7 @@ def gateway_command(args):
|
||||
print("Service installation not supported on this platform.")
|
||||
print("Run manually: hermes gateway run")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
elif subcmd == "uninstall":
|
||||
if is_linux():
|
||||
systemd_uninstall()
|
||||
@@ -944,7 +994,7 @@ def gateway_command(args):
|
||||
else:
|
||||
print("Not supported on this platform.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
elif subcmd == "start":
|
||||
if is_linux():
|
||||
systemd_start()
|
||||
@@ -953,11 +1003,11 @@ def gateway_command(args):
|
||||
else:
|
||||
print("Not supported on this platform.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
elif subcmd == "stop":
|
||||
# Try service first, fall back to killing processes directly
|
||||
service_available = False
|
||||
|
||||
|
||||
if is_linux() and get_systemd_unit_path().exists():
|
||||
try:
|
||||
systemd_stop()
|
||||
@@ -970,7 +1020,7 @@ def gateway_command(args):
|
||||
service_available = True
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
|
||||
if not service_available:
|
||||
# Kill gateway processes directly
|
||||
killed = kill_gateway_processes()
|
||||
@@ -978,11 +1028,11 @@ def gateway_command(args):
|
||||
print(f"✓ Stopped {killed} gateway process(es)")
|
||||
else:
|
||||
print("✗ No gateway processes found")
|
||||
|
||||
|
||||
elif subcmd == "restart":
|
||||
# Try service first, fall back to killing and restarting
|
||||
service_available = False
|
||||
|
||||
|
||||
if is_linux() and get_systemd_unit_path().exists():
|
||||
try:
|
||||
systemd_restart()
|
||||
@@ -995,23 +1045,24 @@ def gateway_command(args):
|
||||
service_available = True
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
|
||||
if not service_available:
|
||||
# Manual restart: kill existing processes
|
||||
killed = kill_gateway_processes()
|
||||
if killed:
|
||||
print(f"✓ Stopped {killed} gateway process(es)")
|
||||
|
||||
|
||||
import time
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
# Start fresh
|
||||
print("Starting gateway...")
|
||||
run_gateway(verbose=False)
|
||||
|
||||
|
||||
elif subcmd == "status":
|
||||
deep = getattr(args, 'deep', False)
|
||||
|
||||
deep = getattr(args, "deep", False)
|
||||
|
||||
# Check for service first
|
||||
if is_linux() and get_systemd_unit_path().exists():
|
||||
systemd_status(deep)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,26 +8,26 @@ Add, remove, or reorder entries here — both `hermes setup` and
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from difflib import get_close_matches
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
# (model_id, display description shown in menus)
|
||||
OPENROUTER_MODELS: list[tuple[str, str]] = [
|
||||
("anthropic/claude-opus-4.6", "recommended"),
|
||||
("anthropic/claude-sonnet-4.5", ""),
|
||||
("openai/gpt-5.4-pro", ""),
|
||||
("openai/gpt-5.4", ""),
|
||||
("openai/gpt-5.3-codex", ""),
|
||||
("google/gemini-3-pro-preview", ""),
|
||||
("google/gemini-3-flash-preview", ""),
|
||||
("qwen/qwen3.5-plus-02-15", ""),
|
||||
("qwen/qwen3.5-35b-a3b", ""),
|
||||
("stepfun/step-3.5-flash", ""),
|
||||
("z-ai/glm-5", ""),
|
||||
("moonshotai/kimi-k2.5", ""),
|
||||
("minimax/minimax-m2.5", ""),
|
||||
("anthropic/claude-opus-4.6", "recommended"),
|
||||
("anthropic/claude-sonnet-4.5", ""),
|
||||
("openai/gpt-5.4-pro", ""),
|
||||
("openai/gpt-5.4", ""),
|
||||
("openai/gpt-5.3-codex", ""),
|
||||
("google/gemini-3-pro-preview", ""),
|
||||
("google/gemini-3-flash-preview", ""),
|
||||
("qwen/qwen3.5-plus-02-15", ""),
|
||||
("qwen/qwen3.5-35b-a3b", ""),
|
||||
("stepfun/step-3.5-flash", ""),
|
||||
("z-ai/glm-5", ""),
|
||||
("moonshotai/kimi-k2.5", ""),
|
||||
("minimax/minimax-m2.5", ""),
|
||||
]
|
||||
|
||||
_PROVIDER_MODELS: dict[str, list[str]] = {
|
||||
@@ -63,7 +63,7 @@ _PROVIDER_LABELS = {
|
||||
"kimi-coding": "Kimi / Moonshot",
|
||||
"minimax": "MiniMax",
|
||||
"minimax-cn": "MiniMax (China)",
|
||||
"custom": "custom endpoint",
|
||||
"custom": "Custom endpoint",
|
||||
}
|
||||
|
||||
_PROVIDER_ALIASES = {
|
||||
@@ -93,9 +93,7 @@ def menu_labels() -> list[str]:
|
||||
|
||||
# All provider IDs and aliases that are valid for the provider:model syntax.
|
||||
_KNOWN_PROVIDER_NAMES: set[str] = (
|
||||
set(_PROVIDER_LABELS.keys())
|
||||
| set(_PROVIDER_ALIASES.keys())
|
||||
| {"openrouter", "custom"}
|
||||
set(_PROVIDER_LABELS.keys()) | set(_PROVIDER_ALIASES.keys()) | {"openrouter", "custom"}
|
||||
)
|
||||
|
||||
|
||||
@@ -107,8 +105,13 @@ def list_available_providers() -> list[dict[str, str]]:
|
||||
"""
|
||||
# Canonical providers in display order
|
||||
_PROVIDER_ORDER = [
|
||||
"openrouter", "nous", "openai-codex",
|
||||
"zai", "kimi-coding", "minimax", "minimax-cn",
|
||||
"openrouter",
|
||||
"nous",
|
||||
"openai-codex",
|
||||
"zai",
|
||||
"kimi-coding",
|
||||
"minimax",
|
||||
"minimax-cn",
|
||||
]
|
||||
# Build reverse alias map
|
||||
aliases_for: dict[str, list[str]] = {}
|
||||
@@ -123,16 +126,19 @@ def list_available_providers() -> list[dict[str, str]]:
|
||||
has_creds = False
|
||||
try:
|
||||
from hermes_cli.runtime_provider import resolve_runtime_provider
|
||||
|
||||
runtime = resolve_runtime_provider(requested=pid)
|
||||
has_creds = bool(runtime.get("api_key"))
|
||||
except Exception:
|
||||
pass
|
||||
result.append({
|
||||
"id": pid,
|
||||
"label": label,
|
||||
"aliases": alias_list,
|
||||
"authenticated": has_creds,
|
||||
})
|
||||
result.append(
|
||||
{
|
||||
"id": pid,
|
||||
"label": label,
|
||||
"aliases": alias_list,
|
||||
"authenticated": has_creds,
|
||||
}
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -157,13 +163,13 @@ def parse_model_input(raw: str, current_provider: str) -> tuple[str, str]:
|
||||
colon = stripped.find(":")
|
||||
if colon > 0:
|
||||
provider_part = stripped[:colon].strip().lower()
|
||||
model_part = stripped[colon + 1:].strip()
|
||||
model_part = stripped[colon + 1 :].strip()
|
||||
if provider_part and model_part and provider_part in _KNOWN_PROVIDER_NAMES:
|
||||
return (normalize_provider(provider_part), model_part)
|
||||
return (current_provider, stripped)
|
||||
|
||||
|
||||
def curated_models_for_provider(provider: Optional[str]) -> list[tuple[str, str]]:
|
||||
def curated_models_for_provider(provider: str | None) -> list[tuple[str, str]]:
|
||||
"""Return ``(model_id, description)`` tuples for a provider's curated list."""
|
||||
normalized = normalize_provider(provider)
|
||||
if normalized == "openrouter":
|
||||
@@ -172,7 +178,7 @@ def curated_models_for_provider(provider: Optional[str]) -> list[tuple[str, str]
|
||||
return [(m, "") for m in models]
|
||||
|
||||
|
||||
def normalize_provider(provider: Optional[str]) -> str:
|
||||
def normalize_provider(provider: str | None) -> str:
|
||||
"""Normalize provider aliases to Hermes' canonical provider ids.
|
||||
|
||||
Note: ``"auto"`` passes through unchanged — use
|
||||
@@ -183,7 +189,7 @@ def normalize_provider(provider: Optional[str]) -> str:
|
||||
return _PROVIDER_ALIASES.get(normalized, normalized)
|
||||
|
||||
|
||||
def provider_model_ids(provider: Optional[str]) -> list[str]:
|
||||
def provider_model_ids(provider: str | None) -> list[str]:
|
||||
"""Return the best known model catalog for a provider."""
|
||||
normalized = normalize_provider(provider)
|
||||
if normalized == "openrouter":
|
||||
@@ -196,10 +202,10 @@ def provider_model_ids(provider: Optional[str]) -> list[str]:
|
||||
|
||||
|
||||
def fetch_api_models(
|
||||
api_key: Optional[str],
|
||||
base_url: Optional[str],
|
||||
api_key: str | None,
|
||||
base_url: str | None,
|
||||
timeout: float = 5.0,
|
||||
) -> Optional[list[str]]:
|
||||
) -> list[str] | None:
|
||||
"""Fetch the list of available model IDs from the provider's ``/models`` endpoint.
|
||||
|
||||
Returns a list of model ID strings, or ``None`` if the endpoint could not
|
||||
@@ -225,10 +231,10 @@ def fetch_api_models(
|
||||
|
||||
def validate_requested_model(
|
||||
model_name: str,
|
||||
provider: Optional[str],
|
||||
provider: str | None,
|
||||
*,
|
||||
api_key: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Validate a ``/model`` value for the active provider.
|
||||
@@ -286,10 +292,7 @@ def validate_requested_model(
|
||||
"accepted": False,
|
||||
"persist": False,
|
||||
"recognized": False,
|
||||
"message": (
|
||||
f"Error: `{requested}` is not a valid model for this provider."
|
||||
f"{suggestion_text}"
|
||||
),
|
||||
"message": (f"Error: `{requested}` is not a valid model for this provider.{suggestion_text}"),
|
||||
}
|
||||
|
||||
# api_models is None — couldn't reach API, fall back to catalog check
|
||||
|
||||
@@ -8,6 +8,7 @@ Usage:
|
||||
hermes pairing clear-pending # Clear all expired/pending codes
|
||||
"""
|
||||
|
||||
|
||||
def pairing_command(args):
|
||||
"""Handle hermes pairing subcommands."""
|
||||
from gateway.pairing import PairingStore
|
||||
@@ -72,10 +73,10 @@ def _cmd_approve(store, platform: str, code: str):
|
||||
name = result.get("user_name", "")
|
||||
display = f"{name} ({uid})" if name else uid
|
||||
print(f"\n Approved! User {display} on {platform} can now use the bot~")
|
||||
print(f" They'll be recognized automatically on their next message.\n")
|
||||
print(" They'll be recognized automatically on their next message.\n")
|
||||
else:
|
||||
print(f"\n Code '{code}' not found or expired for platform '{platform}'.")
|
||||
print(f" Run 'hermes pairing list' to see pending codes.\n")
|
||||
print(" Run 'hermes pairing list' to see pending codes.\n")
|
||||
|
||||
|
||||
def _cmd_revoke(store, platform: str, user_id: str):
|
||||
|
||||
@@ -3,22 +3,22 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
from hermes_cli.auth import (
|
||||
AuthError,
|
||||
PROVIDER_REGISTRY,
|
||||
AuthError,
|
||||
format_auth_error,
|
||||
resolve_provider,
|
||||
resolve_nous_runtime_credentials,
|
||||
resolve_codex_runtime_credentials,
|
||||
resolve_api_key_provider_credentials,
|
||||
resolve_codex_runtime_credentials,
|
||||
resolve_nous_runtime_credentials,
|
||||
resolve_provider,
|
||||
)
|
||||
from hermes_cli.config import load_config
|
||||
from hermes_constants import OPENROUTER_BASE_URL
|
||||
|
||||
|
||||
def _get_model_config() -> Dict[str, Any]:
|
||||
def _get_model_config() -> dict[str, Any]:
|
||||
config = load_config()
|
||||
model_cfg = config.get("model")
|
||||
if isinstance(model_cfg, dict):
|
||||
@@ -28,7 +28,7 @@ def _get_model_config() -> Dict[str, Any]:
|
||||
return {}
|
||||
|
||||
|
||||
def resolve_requested_provider(requested: Optional[str] = None) -> str:
|
||||
def resolve_requested_provider(requested: str | None = None) -> str:
|
||||
"""Resolve provider request from explicit arg, env, then config."""
|
||||
if requested and requested.strip():
|
||||
return requested.strip().lower()
|
||||
@@ -48,9 +48,9 @@ def resolve_requested_provider(requested: Optional[str] = None) -> str:
|
||||
def _resolve_openrouter_runtime(
|
||||
*,
|
||||
requested_provider: str,
|
||||
explicit_api_key: Optional[str] = None,
|
||||
explicit_base_url: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
explicit_api_key: str | None = None,
|
||||
explicit_base_url: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
model_cfg = _get_model_config()
|
||||
cfg_base_url = model_cfg.get("base_url") if isinstance(model_cfg.get("base_url"), str) else ""
|
||||
cfg_provider = model_cfg.get("provider") if isinstance(model_cfg.get("provider"), str) else ""
|
||||
@@ -81,19 +81,9 @@ def _resolve_openrouter_runtime(
|
||||
# provider (issues #420, #560).
|
||||
_is_openrouter_url = "openrouter.ai" in base_url
|
||||
if _is_openrouter_url:
|
||||
api_key = (
|
||||
explicit_api_key
|
||||
or os.getenv("OPENROUTER_API_KEY")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
or ""
|
||||
)
|
||||
api_key = explicit_api_key or os.getenv("OPENROUTER_API_KEY") or os.getenv("OPENAI_API_KEY") or ""
|
||||
else:
|
||||
api_key = (
|
||||
explicit_api_key
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
or os.getenv("OPENROUTER_API_KEY")
|
||||
or ""
|
||||
)
|
||||
api_key = explicit_api_key or os.getenv("OPENAI_API_KEY") or os.getenv("OPENROUTER_API_KEY") or ""
|
||||
|
||||
source = "explicit" if (explicit_api_key or explicit_base_url) else "env/config"
|
||||
|
||||
@@ -108,10 +98,10 @@ def _resolve_openrouter_runtime(
|
||||
|
||||
def resolve_runtime_provider(
|
||||
*,
|
||||
requested: Optional[str] = None,
|
||||
explicit_api_key: Optional[str] = None,
|
||||
explicit_base_url: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
requested: str | None = None,
|
||||
explicit_api_key: str | None = None,
|
||||
explicit_base_url: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Resolve runtime provider credentials for agent execution."""
|
||||
requested_provider = resolve_requested_provider(requested)
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,7 +13,6 @@ handler are thin wrappers that parse args and delegate.
|
||||
import json
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
@@ -29,6 +28,7 @@ _console = Console()
|
||||
# Shared do_* functions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _resolve_short_name(name: str, sources, console: Console) -> str:
|
||||
"""
|
||||
Resolve a short skill name (e.g. 'pptx') to a full identifier by searching
|
||||
@@ -57,7 +57,9 @@ def _resolve_short_name(name: str, sources, console: Console) -> str:
|
||||
table.add_column("Trust", style="dim")
|
||||
table.add_column("Identifier", style="bold cyan")
|
||||
for r in exact:
|
||||
trust_style = {"builtin": "bright_cyan", "trusted": "green", "community": "yellow"}.get(r.trust_level, "dim")
|
||||
trust_style = {"builtin": "bright_cyan", "trusted": "green", "community": "yellow"}.get(
|
||||
r.trust_level, "dim"
|
||||
)
|
||||
trust_label = "official" if r.source == "official" else r.trust_level
|
||||
table.add_row(r.source, f"[{trust_style}]{trust_label}[/]", r.identifier)
|
||||
c.print(table)
|
||||
@@ -76,8 +78,7 @@ def _resolve_short_name(name: str, sources, console: Console) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
def do_search(query: str, source: str = "all", limit: int = 10,
|
||||
console: Optional[Console] = None) -> None:
|
||||
def do_search(query: str, source: str = "all", limit: int = 10, console: Console | None = None) -> None:
|
||||
"""Search registries and display results as a Rich table."""
|
||||
from tools.skills_hub import GitHubAuth, create_source_router, unified_search
|
||||
|
||||
@@ -111,18 +112,19 @@ def do_search(query: str, source: str = "all", limit: int = 10,
|
||||
)
|
||||
|
||||
c.print(table)
|
||||
c.print("[dim]Use: hermes skills inspect <identifier> to preview, "
|
||||
"hermes skills install <identifier> to install[/]\n")
|
||||
c.print(
|
||||
"[dim]Use: hermes skills inspect <identifier> to preview, hermes skills install <identifier> to install[/]\n"
|
||||
)
|
||||
|
||||
|
||||
def do_browse(page: int = 1, page_size: int = 20, source: str = "all",
|
||||
console: Optional[Console] = None) -> None:
|
||||
def do_browse(page: int = 1, page_size: int = 20, source: str = "all", console: Console | None = None) -> None:
|
||||
"""Browse all available skills across registries, paginated.
|
||||
|
||||
Official skills are always shown first, regardless of source filter.
|
||||
"""
|
||||
from tools.skills_hub import (
|
||||
GitHubAuth, create_source_router, OptionalSkillSource, SkillMeta,
|
||||
GitHubAuth,
|
||||
create_source_router,
|
||||
)
|
||||
|
||||
# Clamp page_size to safe range
|
||||
@@ -136,8 +138,7 @@ def do_browse(page: int = 1, page_size: int = 20, source: str = "all",
|
||||
# Collect results from all (or filtered) sources
|
||||
# Use empty query to get everything; per-source limits prevent overload
|
||||
_TRUST_RANK = {"builtin": 3, "trusted": 2, "community": 1}
|
||||
_PER_SOURCE_LIMIT = {"official": 100, "github": 100, "clawhub": 50,
|
||||
"claude-marketplace": 50, "lobehub": 50}
|
||||
_PER_SOURCE_LIMIT = {"official": 100, "github": 100, "clawhub": 50, "claude-marketplace": 50, "lobehub": 50}
|
||||
|
||||
all_results: list = []
|
||||
source_counts: dict = {}
|
||||
@@ -168,11 +169,13 @@ def do_browse(page: int = 1, page_size: int = 20, source: str = "all",
|
||||
deduped = list(seen.values())
|
||||
|
||||
# Sort: official first, then by trust level (desc), then alphabetically
|
||||
deduped.sort(key=lambda r: (
|
||||
-_TRUST_RANK.get(r.trust_level, 0),
|
||||
r.source != "official",
|
||||
r.name.lower(),
|
||||
))
|
||||
deduped.sort(
|
||||
key=lambda r: (
|
||||
-_TRUST_RANK.get(r.trust_level, 0),
|
||||
r.source != "official",
|
||||
r.name.lower(),
|
||||
)
|
||||
)
|
||||
|
||||
# Paginate
|
||||
total = len(deduped)
|
||||
@@ -187,8 +190,7 @@ def do_browse(page: int = 1, page_size: int = 20, source: str = "all",
|
||||
|
||||
# Build header
|
||||
source_label = f"— {source}" if source != "all" else "— all sources"
|
||||
c.print(f"\n[bold]Skills Hub — Browse {source_label}[/]"
|
||||
f" [dim]({total} skills, page {page}/{total_pages})[/]")
|
||||
c.print(f"\n[bold]Skills Hub — Browse {source_label}[/] [dim]({total} skills, page {page}/{total_pages})[/]")
|
||||
if official_count > 0 and page == 1:
|
||||
c.print(f"[bright_cyan]★ {official_count} official optional skill(s) from Nous Research[/]")
|
||||
c.print()
|
||||
@@ -202,8 +204,7 @@ def do_browse(page: int = 1, page_size: int = 20, source: str = "all",
|
||||
table.add_column("Trust", width=10)
|
||||
|
||||
for i, r in enumerate(page_items, start=start + 1):
|
||||
trust_style = {"builtin": "bright_cyan", "trusted": "green",
|
||||
"community": "yellow"}.get(r.trust_level, "dim")
|
||||
trust_style = {"builtin": "bright_cyan", "trusted": "green", "community": "yellow"}.get(r.trust_level, "dim")
|
||||
trust_label = "★ official" if r.source == "official" else r.trust_level
|
||||
|
||||
desc = r.description[:50]
|
||||
@@ -235,18 +236,22 @@ def do_browse(page: int = 1, page_size: int = 20, source: str = "all",
|
||||
parts = [f"{sid}: {ct}" for sid, ct in sorted(source_counts.items())]
|
||||
c.print(f" [dim]Sources: {', '.join(parts)}[/]")
|
||||
|
||||
c.print("[dim]Use: hermes skills inspect <identifier> to preview, "
|
||||
"hermes skills install <identifier> to install[/]\n")
|
||||
|
||||
|
||||
def do_install(identifier: str, category: str = "", force: bool = False,
|
||||
console: Optional[Console] = None) -> None:
|
||||
"""Fetch, quarantine, scan, confirm, and install a skill."""
|
||||
from tools.skills_hub import (
|
||||
GitHubAuth, create_source_router, ensure_hub_dirs,
|
||||
quarantine_bundle, install_from_quarantine, HubLockFile,
|
||||
c.print(
|
||||
"[dim]Use: hermes skills inspect <identifier> to preview, hermes skills install <identifier> to install[/]\n"
|
||||
)
|
||||
|
||||
|
||||
def do_install(identifier: str, category: str = "", force: bool = False, console: Console | None = None) -> None:
|
||||
"""Fetch, quarantine, scan, confirm, and install a skill."""
|
||||
from tools.skills_guard import format_scan_report, scan_skill, should_allow_install
|
||||
from tools.skills_hub import (
|
||||
GitHubAuth,
|
||||
HubLockFile,
|
||||
create_source_router,
|
||||
ensure_hub_dirs,
|
||||
install_from_quarantine,
|
||||
quarantine_bundle,
|
||||
)
|
||||
from tools.skills_guard import scan_skill, should_allow_install, format_scan_report
|
||||
|
||||
c = console or _console
|
||||
ensure_hub_dirs()
|
||||
@@ -304,33 +309,43 @@ def do_install(identifier: str, category: str = "", force: bool = False,
|
||||
# Clean up quarantine
|
||||
shutil.rmtree(q_path, ignore_errors=True)
|
||||
from tools.skills_hub import append_audit_log
|
||||
append_audit_log("BLOCKED", bundle.name, bundle.source,
|
||||
bundle.trust_level, result.verdict,
|
||||
f"{len(result.findings)}_findings")
|
||||
|
||||
append_audit_log(
|
||||
"BLOCKED",
|
||||
bundle.name,
|
||||
bundle.source,
|
||||
bundle.trust_level,
|
||||
result.verdict,
|
||||
f"{len(result.findings)}_findings",
|
||||
)
|
||||
return
|
||||
|
||||
# Confirm with user — show appropriate warning based on source
|
||||
if not force:
|
||||
c.print()
|
||||
if bundle.source == "official":
|
||||
c.print(Panel(
|
||||
"[bold bright_cyan]This is an official optional skill maintained by Nous Research.[/]\n\n"
|
||||
"It ships with hermes-agent but is not activated by default.\n"
|
||||
"Installing will copy it to your skills directory where the agent can use it.\n\n"
|
||||
f"Files will be at: [cyan]~/.hermes/skills/{category + '/' if category else ''}{bundle.name}/[/]",
|
||||
title="Official Skill",
|
||||
border_style="bright_cyan",
|
||||
))
|
||||
c.print(
|
||||
Panel(
|
||||
"[bold bright_cyan]This is an official optional skill maintained by Nous Research.[/]\n\n"
|
||||
"It ships with hermes-agent but is not activated by default.\n"
|
||||
"Installing will copy it to your skills directory where the agent can use it.\n\n"
|
||||
f"Files will be at: [cyan]~/.hermes/skills/{category + '/' if category else ''}{bundle.name}/[/]",
|
||||
title="Official Skill",
|
||||
border_style="bright_cyan",
|
||||
)
|
||||
)
|
||||
else:
|
||||
c.print(Panel(
|
||||
"[bold yellow]You are installing a third-party skill at your own risk.[/]\n\n"
|
||||
"External skills can contain instructions that influence agent behavior,\n"
|
||||
"shell commands, and scripts. Even after automated scanning, you should\n"
|
||||
"review the installed files before use.\n\n"
|
||||
f"Files will be at: [cyan]~/.hermes/skills/{category + '/' if category else ''}{bundle.name}/[/]",
|
||||
title="Disclaimer",
|
||||
border_style="yellow",
|
||||
))
|
||||
c.print(
|
||||
Panel(
|
||||
"[bold yellow]You are installing a third-party skill at your own risk.[/]\n\n"
|
||||
"External skills can contain instructions that influence agent behavior,\n"
|
||||
"shell commands, and scripts. Even after automated scanning, you should\n"
|
||||
"review the installed files before use.\n\n"
|
||||
f"Files will be at: [cyan]~/.hermes/skills/{category + '/' if category else ''}{bundle.name}/[/]",
|
||||
title="Disclaimer",
|
||||
border_style="yellow",
|
||||
)
|
||||
)
|
||||
c.print(f"[bold]Install '{bundle.name}'?[/]")
|
||||
try:
|
||||
answer = input("Confirm [y/N]: ").strip().lower()
|
||||
@@ -344,11 +359,12 @@ def do_install(identifier: str, category: str = "", force: bool = False,
|
||||
# Install
|
||||
install_dir = install_from_quarantine(q_path, bundle.name, category, bundle, result)
|
||||
from tools.skills_hub import SKILLS_DIR
|
||||
|
||||
c.print(f"[bold green]Installed:[/] {install_dir.relative_to(SKILLS_DIR)}")
|
||||
c.print(f"[dim]Files: {', '.join(bundle.files.keys())}[/]\n")
|
||||
|
||||
|
||||
def do_inspect(identifier: str, console: Optional[Console] = None) -> None:
|
||||
def do_inspect(identifier: str, console: Console | None = None) -> None:
|
||||
"""Preview a skill's SKILL.md content without installing."""
|
||||
from tools.skills_hub import GitHubAuth, create_source_router
|
||||
|
||||
@@ -406,7 +422,7 @@ def do_inspect(identifier: str, console: Optional[Console] = None) -> None:
|
||||
c.print()
|
||||
|
||||
|
||||
def do_list(source_filter: str = "all", console: Optional[Console] = None) -> None:
|
||||
def do_list(source_filter: str = "all", console: Console | None = None) -> None:
|
||||
"""List installed skills, distinguishing builtins from hub-installed."""
|
||||
from tools.skills_hub import HubLockFile, ensure_hub_dirs
|
||||
from tools.skills_tool import _find_all_skills
|
||||
@@ -446,14 +462,13 @@ def do_list(source_filter: str = "all", console: Optional[Console] = None) -> No
|
||||
table.add_row(name, category, source_display, f"[{trust_style}]{trust_label}[/]")
|
||||
|
||||
c.print(table)
|
||||
c.print(f"[dim]{len(hub_installed)} hub-installed, "
|
||||
f"{len(all_skills) - len(hub_installed)} builtin[/]\n")
|
||||
c.print(f"[dim]{len(hub_installed)} hub-installed, {len(all_skills) - len(hub_installed)} builtin[/]\n")
|
||||
|
||||
|
||||
def do_audit(name: Optional[str] = None, console: Optional[Console] = None) -> None:
|
||||
def do_audit(name: str | None = None, console: Console | None = None) -> None:
|
||||
"""Re-run security scan on installed hub skills."""
|
||||
from tools.skills_hub import HubLockFile, SKILLS_DIR
|
||||
from tools.skills_guard import scan_skill, format_scan_report
|
||||
from tools.skills_guard import format_scan_report, scan_skill
|
||||
from tools.skills_hub import SKILLS_DIR, HubLockFile
|
||||
|
||||
c = console or _console
|
||||
lock = HubLockFile()
|
||||
@@ -483,7 +498,7 @@ def do_audit(name: Optional[str] = None, console: Optional[Console] = None) -> N
|
||||
c.print()
|
||||
|
||||
|
||||
def do_uninstall(name: str, console: Optional[Console] = None) -> None:
|
||||
def do_uninstall(name: str, console: Console | None = None) -> None:
|
||||
"""Remove a hub-installed skill with confirmation."""
|
||||
from tools.skills_hub import uninstall_skill
|
||||
|
||||
@@ -505,7 +520,7 @@ def do_uninstall(name: str, console: Optional[Console] = None) -> None:
|
||||
c.print(f"[bold red]Error:[/] {msg}\n")
|
||||
|
||||
|
||||
def do_tap(action: str, repo: str = "", console: Optional[Console] = None) -> None:
|
||||
def do_tap(action: str, repo: str = "", console: Console | None = None) -> None:
|
||||
"""Manage taps (custom GitHub repo sources)."""
|
||||
from tools.skills_hub import TapsManager
|
||||
|
||||
@@ -547,11 +562,10 @@ def do_tap(action: str, repo: str = "", console: Optional[Console] = None) -> No
|
||||
c.print(f"[bold red]Unknown tap action:[/] {action}. Use: list, add, remove\n")
|
||||
|
||||
|
||||
def do_publish(skill_path: str, target: str = "github", repo: str = "",
|
||||
console: Optional[Console] = None) -> None:
|
||||
def do_publish(skill_path: str, target: str = "github", repo: str = "", console: Console | None = None) -> None:
|
||||
"""Publish a local skill to a registry (GitHub PR or ClawHub submission)."""
|
||||
from tools.skills_hub import GitHubAuth, SKILLS_DIR
|
||||
from tools.skills_guard import scan_skill, format_scan_report
|
||||
from tools.skills_guard import format_scan_report, scan_skill
|
||||
from tools.skills_hub import SKILLS_DIR, GitHubAuth
|
||||
|
||||
c = console or _console
|
||||
path = Path(skill_path)
|
||||
@@ -565,14 +579,16 @@ def do_publish(skill_path: str, target: str = "github", repo: str = "",
|
||||
|
||||
# Validate the skill
|
||||
import yaml
|
||||
|
||||
skill_md = (path / "SKILL.md").read_text(encoding="utf-8")
|
||||
fm = {}
|
||||
if skill_md.startswith("---"):
|
||||
import re
|
||||
match = re.search(r'\n---\s*\n', skill_md[3:])
|
||||
|
||||
match = re.search(r"\n---\s*\n", skill_md[3:])
|
||||
if match:
|
||||
try:
|
||||
fm = yaml.safe_load(skill_md[3:match.start() + 3]) or {}
|
||||
fm = yaml.safe_load(skill_md[3 : match.start() + 3]) or {}
|
||||
except yaml.YAMLError:
|
||||
pass
|
||||
|
||||
@@ -592,14 +608,18 @@ def do_publish(skill_path: str, target: str = "github", repo: str = "",
|
||||
|
||||
if target == "github":
|
||||
if not repo:
|
||||
c.print("[bold red]Error:[/] --repo required for GitHub publish.\n"
|
||||
"Usage: hermes skills publish <path> --to github --repo owner/repo\n")
|
||||
c.print(
|
||||
"[bold red]Error:[/] --repo required for GitHub publish.\n"
|
||||
"Usage: hermes skills publish <path> --to github --repo owner/repo\n"
|
||||
)
|
||||
return
|
||||
|
||||
auth = GitHubAuth()
|
||||
if not auth.is_authenticated():
|
||||
c.print("[bold red]Error:[/] GitHub authentication required.\n"
|
||||
"Set GITHUB_TOKEN in ~/.hermes/.env or run 'gh auth login'.\n")
|
||||
c.print(
|
||||
"[bold red]Error:[/] GitHub authentication required.\n"
|
||||
"Set GITHUB_TOKEN in ~/.hermes/.env or run 'gh auth login'.\n"
|
||||
)
|
||||
return
|
||||
|
||||
c.print(f"[bold]Publishing '{name}' to {repo}...[/]")
|
||||
@@ -610,14 +630,12 @@ def do_publish(skill_path: str, target: str = "github", repo: str = "",
|
||||
c.print(f"[bold red]Error:[/] {msg}\n")
|
||||
|
||||
elif target == "clawhub":
|
||||
c.print("[yellow]ClawHub publishing is not yet supported. "
|
||||
"Submit manually at https://clawhub.ai/submit[/]\n")
|
||||
c.print("[yellow]ClawHub publishing is not yet supported. Submit manually at https://clawhub.ai/submit[/]\n")
|
||||
else:
|
||||
c.print(f"[bold red]Unknown target:[/] {target}. Use 'github' or 'clawhub'.\n")
|
||||
|
||||
|
||||
def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
auth) -> tuple:
|
||||
def _github_publish(skill_path: Path, skill_name: str, target_repo: str, auth) -> tuple:
|
||||
"""Create a PR to a GitHub repo with the skill. Returns (success, message)."""
|
||||
import httpx
|
||||
|
||||
@@ -627,7 +645,8 @@ def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
try:
|
||||
resp = httpx.post(
|
||||
f"https://api.github.com/repos/{target_repo}/forks",
|
||||
headers=headers, timeout=30,
|
||||
headers=headers,
|
||||
timeout=30,
|
||||
)
|
||||
if resp.status_code in (200, 202):
|
||||
fork = resp.json()
|
||||
@@ -643,7 +662,8 @@ def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
try:
|
||||
resp = httpx.get(
|
||||
f"https://api.github.com/repos/{target_repo}",
|
||||
headers=headers, timeout=15,
|
||||
headers=headers,
|
||||
timeout=15,
|
||||
)
|
||||
default_branch = resp.json().get("default_branch", "main")
|
||||
except Exception:
|
||||
@@ -653,7 +673,8 @@ def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
try:
|
||||
resp = httpx.get(
|
||||
f"https://api.github.com/repos/{fork_repo}/git/refs/heads/{default_branch}",
|
||||
headers=headers, timeout=15,
|
||||
headers=headers,
|
||||
timeout=15,
|
||||
)
|
||||
base_sha = resp.json()["object"]["sha"]
|
||||
except Exception as e:
|
||||
@@ -664,7 +685,8 @@ def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
try:
|
||||
httpx.post(
|
||||
f"https://api.github.com/repos/{fork_repo}/git/refs",
|
||||
headers=headers, timeout=15,
|
||||
headers=headers,
|
||||
timeout=15,
|
||||
json={"ref": f"refs/heads/{branch_name}", "sha": base_sha},
|
||||
)
|
||||
except Exception as e:
|
||||
@@ -678,10 +700,12 @@ def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
upload_path = f"skills/{skill_name}/{rel}"
|
||||
try:
|
||||
import base64
|
||||
|
||||
content_b64 = base64.b64encode(f.read_bytes()).decode()
|
||||
httpx.put(
|
||||
f"https://api.github.com/repos/{fork_repo}/contents/{upload_path}",
|
||||
headers=headers, timeout=15,
|
||||
headers=headers,
|
||||
timeout=15,
|
||||
json={
|
||||
"message": f"Add {skill_name} skill: {rel}",
|
||||
"content": content_b64,
|
||||
@@ -695,11 +719,12 @@ def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
try:
|
||||
resp = httpx.post(
|
||||
f"https://api.github.com/repos/{target_repo}/pulls",
|
||||
headers=headers, timeout=15,
|
||||
headers=headers,
|
||||
timeout=15,
|
||||
json={
|
||||
"title": f"Add skill: {skill_name}",
|
||||
"body": f"Submitting the `{skill_name}` skill via Hermes Skills Hub.\n\n"
|
||||
f"This skill was scanned by the Hermes Skills Guard before submission.",
|
||||
f"This skill was scanned by the Hermes Skills Guard before submission.",
|
||||
"head": f"{fork_repo.split('/')[0]}:{branch_name}",
|
||||
"base": default_branch,
|
||||
},
|
||||
@@ -713,7 +738,7 @@ def _github_publish(skill_path: Path, skill_name: str, target_repo: str,
|
||||
return False, f"Network error creating PR: {e}"
|
||||
|
||||
|
||||
def do_snapshot_export(output_path: str, console: Optional[Console] = None) -> None:
|
||||
def do_snapshot_export(output_path: str, console: Console | None = None) -> None:
|
||||
"""Export current hub skill configuration to a portable JSON file."""
|
||||
from tools.skills_hub import HubLockFile, TapsManager
|
||||
|
||||
@@ -726,16 +751,15 @@ def do_snapshot_export(output_path: str, console: Optional[Console] = None) -> N
|
||||
|
||||
snapshot = {
|
||||
"hermes_version": "0.1.0",
|
||||
"exported_at": __import__("datetime").datetime.now(
|
||||
__import__("datetime").timezone.utc
|
||||
).isoformat(),
|
||||
"exported_at": __import__("datetime").datetime.now(__import__("datetime").timezone.utc).isoformat(),
|
||||
"skills": [
|
||||
{
|
||||
"name": entry["name"],
|
||||
"source": entry.get("source", ""),
|
||||
"identifier": entry.get("identifier", ""),
|
||||
"category": str(Path(entry.get("install_path", "")).parent)
|
||||
if "/" in entry.get("install_path", "") else "",
|
||||
if "/" in entry.get("install_path", "")
|
||||
else "",
|
||||
}
|
||||
for entry in installed
|
||||
],
|
||||
@@ -748,8 +772,7 @@ def do_snapshot_export(output_path: str, console: Optional[Console] = None) -> N
|
||||
c.print(f"[dim]{len(installed)} skill(s), {len(tap_list)} tap(s)[/]\n")
|
||||
|
||||
|
||||
def do_snapshot_import(input_path: str, force: bool = False,
|
||||
console: Optional[Console] = None) -> None:
|
||||
def do_snapshot_import(input_path: str, force: bool = False, console: Console | None = None) -> None:
|
||||
"""Re-install skills from a snapshot file."""
|
||||
from tools.skills_hub import TapsManager
|
||||
|
||||
@@ -799,6 +822,7 @@ def do_snapshot_import(input_path: str, force: bool = False,
|
||||
# CLI argparse entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def skills_command(args) -> None:
|
||||
"""Router for `hermes skills <subcommand>` — called from hermes_cli/main.py."""
|
||||
action = getattr(args, "skills_action", None)
|
||||
@@ -839,7 +863,9 @@ def skills_command(args) -> None:
|
||||
return
|
||||
do_tap(tap_action, repo=repo)
|
||||
else:
|
||||
_console.print("Usage: hermes skills [browse|search|install|inspect|list|audit|uninstall|publish|snapshot|tap]\n")
|
||||
_console.print(
|
||||
"Usage: hermes skills [browse|search|install|inspect|list|audit|uninstall|publish|snapshot|tap]\n"
|
||||
)
|
||||
_console.print("Run 'hermes skills <command> --help' for details.\n")
|
||||
|
||||
|
||||
@@ -847,7 +873,8 @@ def skills_command(args) -> None:
|
||||
# Slash command entry point (/skills in chat)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def handle_skills_slash(cmd: str, console: Optional[Console] = None) -> None:
|
||||
|
||||
def handle_skills_slash(cmd: str, console: Console | None = None) -> None:
|
||||
"""
|
||||
Parse and dispatch `/skills <subcommand> [args]` from the chat interface.
|
||||
|
||||
@@ -1008,17 +1035,19 @@ def handle_skills_slash(cmd: str, console: Optional[Console] = None) -> None:
|
||||
|
||||
def _print_skills_help(console: Console) -> None:
|
||||
"""Print help for the /skills slash command."""
|
||||
console.print(Panel(
|
||||
"[bold]Skills Hub Commands:[/]\n\n"
|
||||
" [cyan]browse[/] [--source official] Browse all available skills (paginated)\n"
|
||||
" [cyan]search[/] <query> Search registries for skills\n"
|
||||
" [cyan]install[/] <identifier> Install a skill (with security scan)\n"
|
||||
" [cyan]inspect[/] <identifier> Preview a skill without installing\n"
|
||||
" [cyan]list[/] [--source hub|builtin] List installed skills\n"
|
||||
" [cyan]audit[/] [name] Re-scan hub skills for security\n"
|
||||
" [cyan]uninstall[/] <name> Remove a hub-installed skill\n"
|
||||
" [cyan]publish[/] <path> --repo <r> Publish a skill to GitHub via PR\n"
|
||||
" [cyan]snapshot[/] export|import Export/import skill configurations\n"
|
||||
" [cyan]tap[/] list|add|remove Manage skill sources\n",
|
||||
title="/skills",
|
||||
))
|
||||
console.print(
|
||||
Panel(
|
||||
"[bold]Skills Hub Commands:[/]\n\n"
|
||||
" [cyan]browse[/] [--source official] Browse all available skills (paginated)\n"
|
||||
" [cyan]search[/] <query> Search registries for skills\n"
|
||||
" [cyan]install[/] <identifier> Install a skill (with security scan)\n"
|
||||
" [cyan]inspect[/] <identifier> Preview a skill without installing\n"
|
||||
" [cyan]list[/] [--source hub|builtin] List installed skills\n"
|
||||
" [cyan]audit[/] [name] Re-scan hub skills for security\n"
|
||||
" [cyan]uninstall[/] <name> Remove a hub-installed skill\n"
|
||||
" [cyan]publish[/] <path> --repo <r> Publish a skill to GitHub via PR\n"
|
||||
" [cyan]snapshot[/] export|import Export/import skill configurations\n"
|
||||
" [cyan]tap[/] list|add|remove Manage skill sources\n",
|
||||
title="/skills",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -5,21 +5,25 @@ Shows the status of all Hermes Agent components.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
|
||||
|
||||
from datetime import UTC
|
||||
|
||||
from hermes_cli.colors import Colors, color
|
||||
from hermes_cli.config import get_env_path, get_env_value
|
||||
from hermes_constants import OPENROUTER_MODELS_URL
|
||||
|
||||
|
||||
def check_mark(ok: bool) -> str:
|
||||
if ok:
|
||||
return color("✓", Colors.GREEN)
|
||||
return color("✗", Colors.RED)
|
||||
|
||||
|
||||
def redact_key(key: str) -> str:
|
||||
"""Redact an API key for display."""
|
||||
if not key:
|
||||
@@ -33,7 +37,8 @@ def _format_iso_timestamp(value) -> str:
|
||||
"""Format ISO timestamps for status output, converting to local timezone."""
|
||||
if not value or not isinstance(value, str):
|
||||
return "(unknown)"
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime
|
||||
|
||||
text = value.strip()
|
||||
if not text:
|
||||
return "(unknown)"
|
||||
@@ -42,7 +47,7 @@ def _format_iso_timestamp(value) -> str:
|
||||
try:
|
||||
parsed = datetime.fromisoformat(text)
|
||||
if parsed.tzinfo is None:
|
||||
parsed = parsed.replace(tzinfo=timezone.utc)
|
||||
parsed = parsed.replace(tzinfo=UTC)
|
||||
except Exception:
|
||||
return value
|
||||
return parsed.astimezone().strftime("%Y-%m-%d %H:%M:%S %Z")
|
||||
@@ -50,14 +55,14 @@ def _format_iso_timestamp(value) -> str:
|
||||
|
||||
def show_status(args):
|
||||
"""Show status of all Hermes Agent components."""
|
||||
show_all = getattr(args, 'all', False)
|
||||
deep = getattr(args, 'deep', False)
|
||||
|
||||
show_all = getattr(args, "all", False)
|
||||
deep = getattr(args, "deep", False)
|
||||
|
||||
print()
|
||||
print(color("┌─────────────────────────────────────────────────────────┐", Colors.CYAN))
|
||||
print(color("│ ⚕ Hermes Agent Status │", Colors.CYAN))
|
||||
print(color("└─────────────────────────────────────────────────────────┘", Colors.CYAN))
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Environment
|
||||
# =========================================================================
|
||||
@@ -65,19 +70,19 @@ def show_status(args):
|
||||
print(color("◆ Environment", Colors.CYAN, Colors.BOLD))
|
||||
print(f" Project: {PROJECT_ROOT}")
|
||||
print(f" Python: {sys.version.split()[0]}")
|
||||
|
||||
|
||||
env_path = get_env_path()
|
||||
print(f" .env file: {check_mark(env_path.exists())} {'exists' if env_path.exists() else 'not found'}")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# API Keys
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ API Keys", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
keys = {
|
||||
"OpenRouter": "OPENROUTER_API_KEY",
|
||||
"Anthropic": "ANTHROPIC_API_KEY",
|
||||
"Anthropic": "ANTHROPIC_API_KEY",
|
||||
"OpenAI": "OPENAI_API_KEY",
|
||||
"Z.AI/GLM": "GLM_API_KEY",
|
||||
"Kimi": "KIMI_API_KEY",
|
||||
@@ -91,7 +96,7 @@ def show_status(args):
|
||||
"ElevenLabs": "ELEVENLABS_API_KEY",
|
||||
"GitHub": "GITHUB_TOKEN",
|
||||
}
|
||||
|
||||
|
||||
for name, env_var in keys.items():
|
||||
value = get_env_value(env_var) or ""
|
||||
has_key = bool(value)
|
||||
@@ -105,7 +110,8 @@ def show_status(args):
|
||||
print(color("◆ Auth Providers", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
try:
|
||||
from hermes_cli.auth import get_nous_auth_status, get_codex_auth_status
|
||||
from hermes_cli.auth import get_codex_auth_status, get_nous_auth_status
|
||||
|
||||
nous_status = get_nous_auth_status()
|
||||
codex_status = get_codex_auth_status()
|
||||
except Exception:
|
||||
@@ -148,10 +154,10 @@ def show_status(args):
|
||||
print(color("◆ API-Key Providers", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
apikey_providers = {
|
||||
"Z.AI / GLM": ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"),
|
||||
"Kimi / Moonshot": ("KIMI_API_KEY",),
|
||||
"MiniMax": ("MINIMAX_API_KEY",),
|
||||
"MiniMax (China)": ("MINIMAX_CN_API_KEY",),
|
||||
"Z.AI / GLM": ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"),
|
||||
"Kimi / Moonshot": ("KIMI_API_KEY",),
|
||||
"MiniMax": ("MINIMAX_API_KEY",),
|
||||
"MiniMax (China)": ("MINIMAX_CN_API_KEY",),
|
||||
}
|
||||
for pname, env_vars in apikey_providers.items():
|
||||
key_val = ""
|
||||
@@ -168,19 +174,20 @@ def show_status(args):
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Terminal Backend", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
terminal_env = os.getenv("TERMINAL_ENV", "")
|
||||
if not terminal_env:
|
||||
# Fall back to config file value when env var isn't set
|
||||
# (hermes status doesn't go through cli.py's config loading)
|
||||
try:
|
||||
from hermes_cli.config import load_config
|
||||
|
||||
_cfg = load_config()
|
||||
terminal_env = _cfg.get("terminal", {}).get("backend", "local")
|
||||
except Exception:
|
||||
terminal_env = "local"
|
||||
print(f" Backend: {terminal_env}")
|
||||
|
||||
|
||||
if terminal_env == "ssh":
|
||||
ssh_host = os.getenv("TERMINAL_SSH_HOST", "")
|
||||
ssh_user = os.getenv("TERMINAL_SSH_USER", "")
|
||||
@@ -192,16 +199,16 @@ def show_status(args):
|
||||
elif terminal_env == "daytona":
|
||||
daytona_image = os.getenv("TERMINAL_DAYTONA_IMAGE", "nikolaik/python-nodejs:python3.11-nodejs20")
|
||||
print(f" Daytona Image: {daytona_image}")
|
||||
|
||||
|
||||
sudo_password = os.getenv("SUDO_PASSWORD", "")
|
||||
print(f" Sudo: {check_mark(bool(sudo_password))} {'enabled' if sudo_password else 'disabled'}")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Messaging Platforms
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Messaging Platforms", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
platforms = {
|
||||
"Telegram": ("TELEGRAM_BOT_TOKEN", "TELEGRAM_HOME_CHANNEL"),
|
||||
"Discord": ("DISCORD_BOT_TOKEN", "DISCORD_HOME_CHANNEL"),
|
||||
@@ -209,59 +216,52 @@ def show_status(args):
|
||||
"Signal": ("SIGNAL_HTTP_URL", "SIGNAL_HOME_CHANNEL"),
|
||||
"Slack": ("SLACK_BOT_TOKEN", None),
|
||||
}
|
||||
|
||||
|
||||
for name, (token_var, home_var) in platforms.items():
|
||||
token = os.getenv(token_var, "")
|
||||
has_token = bool(token)
|
||||
|
||||
|
||||
home_channel = ""
|
||||
if home_var:
|
||||
home_channel = os.getenv(home_var, "")
|
||||
|
||||
|
||||
status = "configured" if has_token else "not configured"
|
||||
if home_channel:
|
||||
status += f" (home: {home_channel})"
|
||||
|
||||
|
||||
print(f" {name:<12} {check_mark(has_token)} {status}")
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Gateway Status
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Gateway Service", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
if sys.platform.startswith('linux'):
|
||||
result = subprocess.run(
|
||||
["systemctl", "--user", "is-active", "hermes-gateway"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if sys.platform.startswith("linux"):
|
||||
result = subprocess.run(["systemctl", "--user", "is-active", "hermes-gateway"], capture_output=True, text=True)
|
||||
is_active = result.stdout.strip() == "active"
|
||||
print(f" Status: {check_mark(is_active)} {'running' if is_active else 'stopped'}")
|
||||
print(f" Manager: systemd (user)")
|
||||
|
||||
elif sys.platform == 'darwin':
|
||||
result = subprocess.run(
|
||||
["launchctl", "list", "ai.hermes.gateway"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
print(" Manager: systemd (user)")
|
||||
|
||||
elif sys.platform == "darwin":
|
||||
result = subprocess.run(["launchctl", "list", "ai.hermes.gateway"], capture_output=True, text=True)
|
||||
is_loaded = result.returncode == 0
|
||||
print(f" Status: {check_mark(is_loaded)} {'loaded' if is_loaded else 'not loaded'}")
|
||||
print(f" Manager: launchd")
|
||||
print(" Manager: launchd")
|
||||
else:
|
||||
print(f" Status: {color('N/A', Colors.DIM)}")
|
||||
print(f" Manager: (not supported on this platform)")
|
||||
|
||||
print(" Manager: (not supported on this platform)")
|
||||
|
||||
# =========================================================================
|
||||
# Cron Jobs
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Scheduled Jobs", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
jobs_file = Path.home() / ".hermes" / "cron" / "jobs.json"
|
||||
if jobs_file.exists():
|
||||
import json
|
||||
|
||||
try:
|
||||
with open(jobs_file) as f:
|
||||
data = json.load(f)
|
||||
@@ -269,56 +269,57 @@ def show_status(args):
|
||||
enabled_jobs = [j for j in jobs if j.get("enabled", True)]
|
||||
print(f" Jobs: {len(enabled_jobs)} active, {len(jobs)} total")
|
||||
except Exception:
|
||||
print(f" Jobs: (error reading jobs file)")
|
||||
print(" Jobs: (error reading jobs file)")
|
||||
else:
|
||||
print(f" Jobs: 0")
|
||||
|
||||
print(" Jobs: 0")
|
||||
|
||||
# =========================================================================
|
||||
# Sessions
|
||||
# =========================================================================
|
||||
print()
|
||||
print(color("◆ Sessions", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
sessions_file = Path.home() / ".hermes" / "sessions" / "sessions.json"
|
||||
if sessions_file.exists():
|
||||
import json
|
||||
|
||||
try:
|
||||
with open(sessions_file) as f:
|
||||
data = json.load(f)
|
||||
print(f" Active: {len(data)} session(s)")
|
||||
except Exception:
|
||||
print(f" Active: (error reading sessions file)")
|
||||
print(" Active: (error reading sessions file)")
|
||||
else:
|
||||
print(f" Active: 0")
|
||||
|
||||
print(" Active: 0")
|
||||
|
||||
# =========================================================================
|
||||
# Deep checks
|
||||
# =========================================================================
|
||||
if deep:
|
||||
print()
|
||||
print(color("◆ Deep Checks", Colors.CYAN, Colors.BOLD))
|
||||
|
||||
|
||||
# Check OpenRouter connectivity
|
||||
openrouter_key = os.getenv("OPENROUTER_API_KEY", "")
|
||||
if openrouter_key:
|
||||
try:
|
||||
import httpx
|
||||
|
||||
response = httpx.get(
|
||||
OPENROUTER_MODELS_URL,
|
||||
headers={"Authorization": f"Bearer {openrouter_key}"},
|
||||
timeout=10
|
||||
OPENROUTER_MODELS_URL, headers={"Authorization": f"Bearer {openrouter_key}"}, timeout=10
|
||||
)
|
||||
ok = response.status_code == 200
|
||||
print(f" OpenRouter: {check_mark(ok)} {'reachable' if ok else f'error ({response.status_code})'}")
|
||||
except Exception as e:
|
||||
print(f" OpenRouter: {check_mark(False)} error: {e}")
|
||||
|
||||
|
||||
# Check gateway port
|
||||
try:
|
||||
import socket
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(1)
|
||||
result = sock.connect_ex(('127.0.0.1', 18789))
|
||||
result = sock.connect_ex(("127.0.0.1", 18789))
|
||||
sock.close()
|
||||
# Port in use = gateway likely running
|
||||
port_in_use = result == 0
|
||||
@@ -326,7 +327,7 @@ def show_status(args):
|
||||
print(f" Port 18789: {'in use' if port_in_use else 'available'}")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
print()
|
||||
print(color("─" * 60, Colors.DIM))
|
||||
print(color(" Run 'hermes doctor' for detailed diagnostics", Colors.DIM))
|
||||
|
||||
@@ -11,33 +11,37 @@ the `platform_toolsets` key.
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Set
|
||||
|
||||
import os
|
||||
|
||||
from hermes_cli.config import (
|
||||
load_config, save_config, get_env_value, save_env_value,
|
||||
get_hermes_home,
|
||||
)
|
||||
from hermes_cli.colors import Colors, color
|
||||
from hermes_cli.config import (
|
||||
get_env_value,
|
||||
load_config,
|
||||
save_config,
|
||||
save_env_value,
|
||||
)
|
||||
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
|
||||
|
||||
|
||||
# ─── UI Helpers (shared with setup.py) ────────────────────────────────────────
|
||||
|
||||
|
||||
def _print_info(text: str):
|
||||
print(color(f" {text}", Colors.DIM))
|
||||
|
||||
|
||||
def _print_success(text: str):
|
||||
print(color(f"✓ {text}", Colors.GREEN))
|
||||
|
||||
|
||||
def _print_warning(text: str):
|
||||
print(color(f"⚠ {text}", Colors.YELLOW))
|
||||
|
||||
|
||||
def _print_error(text: str):
|
||||
print(color(f"✗ {text}", Colors.RED))
|
||||
|
||||
|
||||
def _prompt(question: str, default: str = None, password: bool = False) -> str:
|
||||
if default:
|
||||
display = f"{question} [{default}]: "
|
||||
@@ -46,6 +50,7 @@ def _prompt(question: str, default: str = None, password: bool = False) -> str:
|
||||
try:
|
||||
if password:
|
||||
import getpass
|
||||
|
||||
value = getpass.getpass(color(display, Colors.YELLOW))
|
||||
else:
|
||||
value = input(color(display, Colors.YELLOW))
|
||||
@@ -54,6 +59,7 @@ def _prompt(question: str, default: str = None, password: bool = False) -> str:
|
||||
print()
|
||||
return default or ""
|
||||
|
||||
|
||||
def _prompt_yes_no(question: str, default: bool = True) -> bool:
|
||||
default_str = "Y/n" if default else "y/N"
|
||||
while True:
|
||||
@@ -64,9 +70,9 @@ def _prompt_yes_no(question: str, default: bool = True) -> bool:
|
||||
return default
|
||||
if not value:
|
||||
return default
|
||||
if value in ('y', 'yes'):
|
||||
if value in ("y", "yes"):
|
||||
return True
|
||||
if value in ('n', 'no'):
|
||||
if value in ("n", "no"):
|
||||
return False
|
||||
|
||||
|
||||
@@ -76,24 +82,24 @@ def _prompt_yes_no(question: str, default: bool = True) -> bool:
|
||||
# Each entry: (toolset_name, label, description)
|
||||
# These map to keys in toolsets.py TOOLSETS dict.
|
||||
CONFIGURABLE_TOOLSETS = [
|
||||
("web", "🔍 Web Search & Scraping", "web_search, web_extract"),
|
||||
("browser", "🌐 Browser Automation", "navigate, click, type, scroll"),
|
||||
("terminal", "💻 Terminal & Processes", "terminal, process"),
|
||||
("file", "📁 File Operations", "read, write, patch, search"),
|
||||
("code_execution", "⚡ Code Execution", "execute_code"),
|
||||
("vision", "👁️ Vision / Image Analysis", "vision_analyze"),
|
||||
("image_gen", "🎨 Image Generation", "image_generate"),
|
||||
("moa", "🧠 Mixture of Agents", "mixture_of_agents"),
|
||||
("tts", "🔊 Text-to-Speech", "text_to_speech"),
|
||||
("skills", "📚 Skills", "list, view, manage"),
|
||||
("todo", "📋 Task Planning", "todo"),
|
||||
("memory", "💾 Memory", "persistent memory across sessions"),
|
||||
("session_search", "🔎 Session Search", "search past conversations"),
|
||||
("clarify", "❓ Clarifying Questions", "clarify"),
|
||||
("delegation", "👥 Task Delegation", "delegate_task"),
|
||||
("cronjob", "⏰ Cron Jobs", "schedule, list, remove"),
|
||||
("rl", "🧪 RL Training", "Tinker-Atropos training tools"),
|
||||
("homeassistant", "🏠 Home Assistant", "smart home device control"),
|
||||
("web", "🔍 Web Search & Scraping", "web_search, web_extract"),
|
||||
("browser", "🌐 Browser Automation", "navigate, click, type, scroll"),
|
||||
("terminal", "💻 Terminal & Processes", "terminal, process"),
|
||||
("file", "📁 File Operations", "read, write, patch, search"),
|
||||
("code_execution", "⚡ Code Execution", "execute_code"),
|
||||
("vision", "👁️ Vision / Image Analysis", "vision_analyze"),
|
||||
("image_gen", "🎨 Image Generation", "image_generate"),
|
||||
("moa", "🧠 Mixture of Agents", "mixture_of_agents"),
|
||||
("tts", "🔊 Text-to-Speech", "text_to_speech"),
|
||||
("skills", "📚 Skills", "list, view, manage"),
|
||||
("todo", "📋 Task Planning", "todo"),
|
||||
("memory", "💾 Memory", "persistent memory across sessions"),
|
||||
("session_search", "🔎 Session Search", "search past conversations"),
|
||||
("clarify", "❓ Clarifying Questions", "clarify"),
|
||||
("delegation", "👥 Task Delegation", "delegate_task"),
|
||||
("cronjob", "⏰ Cron Jobs", "schedule, list, remove"),
|
||||
("rl", "🧪 RL Training", "Tinker-Atropos training tools"),
|
||||
("homeassistant", "🏠 Home Assistant", "smart home device control"),
|
||||
]
|
||||
|
||||
# Toolsets that are OFF by default for new installs.
|
||||
@@ -103,11 +109,11 @@ _DEFAULT_OFF_TOOLSETS = {"moa", "homeassistant", "rl"}
|
||||
|
||||
# Platform display config
|
||||
PLATFORMS = {
|
||||
"cli": {"label": "🖥️ CLI", "default_toolset": "hermes-cli"},
|
||||
"telegram": {"label": "📱 Telegram", "default_toolset": "hermes-telegram"},
|
||||
"discord": {"label": "💬 Discord", "default_toolset": "hermes-discord"},
|
||||
"slack": {"label": "💼 Slack", "default_toolset": "hermes-slack"},
|
||||
"whatsapp": {"label": "📱 WhatsApp", "default_toolset": "hermes-whatsapp"},
|
||||
"cli": {"label": "🖥️ CLI", "default_toolset": "hermes-cli"},
|
||||
"telegram": {"label": "📱 Telegram", "default_toolset": "hermes-telegram"},
|
||||
"discord": {"label": "💬 Discord", "default_toolset": "hermes-discord"},
|
||||
"slack": {"label": "💼 Slack", "default_toolset": "hermes-slack"},
|
||||
"whatsapp": {"label": "📱 WhatsApp", "default_toolset": "hermes-whatsapp"},
|
||||
}
|
||||
|
||||
|
||||
@@ -131,7 +137,11 @@ TOOL_CATEGORIES = {
|
||||
"name": "OpenAI TTS",
|
||||
"tag": "Premium - high quality voices",
|
||||
"env_vars": [
|
||||
{"key": "VOICE_TOOLS_OPENAI_KEY", "prompt": "OpenAI API key", "url": "https://platform.openai.com/api-keys"},
|
||||
{
|
||||
"key": "VOICE_TOOLS_OPENAI_KEY",
|
||||
"prompt": "OpenAI API key",
|
||||
"url": "https://platform.openai.com/api-keys",
|
||||
},
|
||||
],
|
||||
"tts_provider": "openai",
|
||||
},
|
||||
@@ -139,7 +149,11 @@ TOOL_CATEGORIES = {
|
||||
"name": "ElevenLabs",
|
||||
"tag": "Premium - most natural voices",
|
||||
"env_vars": [
|
||||
{"key": "ELEVENLABS_API_KEY", "prompt": "ElevenLabs API key", "url": "https://elevenlabs.io/app/settings/api-keys"},
|
||||
{
|
||||
"key": "ELEVENLABS_API_KEY",
|
||||
"prompt": "ElevenLabs API key",
|
||||
"url": "https://elevenlabs.io/app/settings/api-keys",
|
||||
},
|
||||
],
|
||||
"tts_provider": "elevenlabs",
|
||||
},
|
||||
@@ -224,7 +238,11 @@ TOOL_CATEGORIES = {
|
||||
"name": "Tinker / Atropos",
|
||||
"tag": "RL training platform",
|
||||
"env_vars": [
|
||||
{"key": "TINKER_API_KEY", "prompt": "Tinker API key", "url": "https://tinker-console.thinkingmachines.ai/keys"},
|
||||
{
|
||||
"key": "TINKER_API_KEY",
|
||||
"prompt": "Tinker API key",
|
||||
"url": "https://tinker-console.thinkingmachines.ai/keys",
|
||||
},
|
||||
{"key": "WANDB_API_KEY", "prompt": "WandB API key", "url": "https://wandb.ai/authorize"},
|
||||
],
|
||||
"post_setup": "rl_training",
|
||||
@@ -236,24 +254,26 @@ TOOL_CATEGORIES = {
|
||||
# Simple env-var requirements for toolsets NOT in TOOL_CATEGORIES.
|
||||
# Used as a fallback for tools like vision/moa that just need an API key.
|
||||
TOOLSET_ENV_REQUIREMENTS = {
|
||||
"vision": [("OPENROUTER_API_KEY", "https://openrouter.ai/keys")],
|
||||
"moa": [("OPENROUTER_API_KEY", "https://openrouter.ai/keys")],
|
||||
"vision": [("OPENROUTER_API_KEY", "https://openrouter.ai/keys")],
|
||||
"moa": [("OPENROUTER_API_KEY", "https://openrouter.ai/keys")],
|
||||
}
|
||||
|
||||
|
||||
# ─── Post-Setup Hooks ─────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _run_post_setup(post_setup_key: str):
|
||||
"""Run post-setup hooks for tools that need extra installation steps."""
|
||||
import shutil
|
||||
|
||||
if post_setup_key == "browserbase":
|
||||
node_modules = PROJECT_ROOT / "node_modules" / "agent-browser"
|
||||
if not node_modules.exists() and shutil.which("npm"):
|
||||
_print_info(" Installing Node.js dependencies for browser tools...")
|
||||
import subprocess
|
||||
|
||||
result = subprocess.run(
|
||||
["npm", "install", "--silent"],
|
||||
capture_output=True, text=True, cwd=str(PROJECT_ROOT)
|
||||
["npm", "install", "--silent"], capture_output=True, text=True, cwd=str(PROJECT_ROOT)
|
||||
)
|
||||
if result.returncode == 0:
|
||||
_print_success(" Node.js dependencies installed")
|
||||
@@ -270,16 +290,17 @@ def _run_post_setup(post_setup_key: str):
|
||||
if tinker_dir.exists() and (tinker_dir / "pyproject.toml").exists():
|
||||
_print_info(" Installing tinker-atropos submodule...")
|
||||
import subprocess
|
||||
|
||||
uv_bin = shutil.which("uv")
|
||||
if uv_bin:
|
||||
result = subprocess.run(
|
||||
[uv_bin, "pip", "install", "--python", sys.executable, "-e", str(tinker_dir)],
|
||||
capture_output=True, text=True
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
else:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "pip", "install", "-e", str(tinker_dir)],
|
||||
capture_output=True, text=True
|
||||
[sys.executable, "-m", "pip", "install", "-e", str(tinker_dir)], capture_output=True, text=True
|
||||
)
|
||||
if result.returncode == 0:
|
||||
_print_success(" tinker-atropos installed")
|
||||
@@ -294,7 +315,8 @@ def _run_post_setup(post_setup_key: str):
|
||||
|
||||
# ─── Platform / Toolset Helpers ───────────────────────────────────────────────
|
||||
|
||||
def _get_enabled_platforms() -> List[str]:
|
||||
|
||||
def _get_enabled_platforms() -> list[str]:
|
||||
"""Return platform keys that are configured (have tokens or are CLI)."""
|
||||
enabled = ["cli"]
|
||||
if get_env_value("TELEGRAM_BOT_TOKEN"):
|
||||
@@ -308,9 +330,9 @@ def _get_enabled_platforms() -> List[str]:
|
||||
return enabled
|
||||
|
||||
|
||||
def _get_platform_tools(config: dict, platform: str) -> Set[str]:
|
||||
def _get_platform_tools(config: dict, platform: str) -> set[str]:
|
||||
"""Resolve which individual toolset names are enabled for a platform."""
|
||||
from toolsets import resolve_toolset, TOOLSETS
|
||||
from toolsets import resolve_toolset
|
||||
|
||||
platform_toolsets = config.get("platform_toolsets", {})
|
||||
toolset_names = platform_toolsets.get(platform)
|
||||
@@ -335,7 +357,7 @@ def _get_platform_tools(config: dict, platform: str) -> Set[str]:
|
||||
return enabled_toolsets
|
||||
|
||||
|
||||
def _save_platform_tools(config: dict, platform: str, enabled_toolset_keys: Set[str]):
|
||||
def _save_platform_tools(config: dict, platform: str, enabled_toolset_keys: set[str]):
|
||||
"""Save the selected toolset keys for a platform to config."""
|
||||
config.setdefault("platform_toolsets", {})
|
||||
config["platform_toolsets"][platform] = sorted(enabled_toolset_keys)
|
||||
@@ -364,6 +386,7 @@ def _toolset_has_keys(ts_key: str) -> bool:
|
||||
|
||||
# ─── Menu Helpers ─────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _prompt_choice(question: str, choices: list, default: int = 0) -> int:
|
||||
"""Single-select menu (arrow keys). Uses curses to avoid simple_term_menu
|
||||
rendering bugs in tmux, iTerm, and other non-standard terminals."""
|
||||
@@ -371,6 +394,7 @@ def _prompt_choice(question: str, choices: list, default: int = 0) -> int:
|
||||
# Curses-based single-select — works in tmux, iTerm, and standard terminals
|
||||
try:
|
||||
import curses
|
||||
|
||||
result_holder = [default]
|
||||
|
||||
def _curses_menu(stdscr):
|
||||
@@ -386,8 +410,9 @@ def _prompt_choice(question: str, choices: list, default: int = 0) -> int:
|
||||
stdscr.clear()
|
||||
max_y, max_x = stdscr.getmaxyx()
|
||||
try:
|
||||
stdscr.addnstr(0, 0, question, max_x - 1,
|
||||
curses.A_BOLD | (curses.color_pair(2) if curses.has_colors() else 0))
|
||||
stdscr.addnstr(
|
||||
0, 0, question, max_x - 1, curses.A_BOLD | (curses.color_pair(2) if curses.has_colors() else 0)
|
||||
)
|
||||
except curses.error:
|
||||
pass
|
||||
|
||||
@@ -410,14 +435,14 @@ def _prompt_choice(question: str, choices: list, default: int = 0) -> int:
|
||||
stdscr.refresh()
|
||||
key = stdscr.getch()
|
||||
|
||||
if key in (curses.KEY_UP, ord('k')):
|
||||
if key in (curses.KEY_UP, ord("k")):
|
||||
cursor = (cursor - 1) % len(choices)
|
||||
elif key in (curses.KEY_DOWN, ord('j')):
|
||||
elif key in (curses.KEY_DOWN, ord("j")):
|
||||
cursor = (cursor + 1) % len(choices)
|
||||
elif key in (curses.KEY_ENTER, 10, 13):
|
||||
result_holder[0] = cursor
|
||||
return
|
||||
elif key in (27, ord('q')):
|
||||
elif key in (27, ord("q")):
|
||||
return
|
||||
|
||||
curses.wrapper(_curses_menu)
|
||||
@@ -431,7 +456,7 @@ def _prompt_choice(question: str, choices: list, default: int = 0) -> int:
|
||||
for i, c in enumerate(choices):
|
||||
marker = "●" if i == default else "○"
|
||||
style = Colors.GREEN if i == default else ""
|
||||
print(color(f" {marker} {i+1}. {c}", style) if style else f" {marker} {i+1}. {c}")
|
||||
print(color(f" {marker} {i + 1}. {c}", style) if style else f" {marker} {i + 1}. {c}")
|
||||
while True:
|
||||
try:
|
||||
val = input(color(f" Select [1-{len(choices)}] ({default + 1}): ", Colors.DIM))
|
||||
@@ -445,7 +470,7 @@ def _prompt_choice(question: str, choices: list, default: int = 0) -> int:
|
||||
return default
|
||||
|
||||
|
||||
def _prompt_toolset_checklist(platform_label: str, enabled: Set[str]) -> Set[str]:
|
||||
def _prompt_toolset_checklist(platform_label: str, enabled: set[str]) -> set[str]:
|
||||
"""Multi-select checklist of toolsets. Returns set of selected toolset keys."""
|
||||
|
||||
labels = []
|
||||
@@ -455,15 +480,13 @@ def _prompt_toolset_checklist(platform_label: str, enabled: Set[str]) -> Set[str
|
||||
suffix = " [no API key]"
|
||||
labels.append(f"{ts_label} ({ts_desc}){suffix}")
|
||||
|
||||
pre_selected_indices = [
|
||||
i for i, (ts_key, _, _) in enumerate(CONFIGURABLE_TOOLSETS)
|
||||
if ts_key in enabled
|
||||
]
|
||||
pre_selected_indices = [i for i, (ts_key, _, _) in enumerate(CONFIGURABLE_TOOLSETS) if ts_key in enabled]
|
||||
|
||||
# Curses-based multi-select — arrow keys + space to toggle + enter to confirm.
|
||||
# simple_term_menu has rendering bugs in tmux, iTerm, and other terminals.
|
||||
try:
|
||||
import curses
|
||||
|
||||
selected = set(pre_selected_indices)
|
||||
result_holder = [None]
|
||||
|
||||
@@ -483,7 +506,13 @@ def _prompt_toolset_checklist(platform_label: str, enabled: Set[str]) -> Set[str
|
||||
max_y, max_x = stdscr.getmaxyx()
|
||||
header = f"Tools for {platform_label} — ↑↓ navigate, SPACE toggle, ENTER confirm"
|
||||
try:
|
||||
stdscr.addnstr(0, 0, header, max_x - 1, curses.A_BOLD | curses.color_pair(2) if curses.has_colors() else curses.A_BOLD)
|
||||
stdscr.addnstr(
|
||||
0,
|
||||
0,
|
||||
header,
|
||||
max_x - 1,
|
||||
curses.A_BOLD | curses.color_pair(2) if curses.has_colors() else curses.A_BOLD,
|
||||
)
|
||||
except curses.error:
|
||||
pass
|
||||
|
||||
@@ -514,11 +543,11 @@ def _prompt_toolset_checklist(platform_label: str, enabled: Set[str]) -> Set[str
|
||||
stdscr.refresh()
|
||||
key = stdscr.getch()
|
||||
|
||||
if key in (curses.KEY_UP, ord('k')):
|
||||
if key in (curses.KEY_UP, ord("k")):
|
||||
cursor = (cursor - 1) % len(labels)
|
||||
elif key in (curses.KEY_DOWN, ord('j')):
|
||||
elif key in (curses.KEY_DOWN, ord("j")):
|
||||
cursor = (cursor + 1) % len(labels)
|
||||
elif key == ord(' '):
|
||||
elif key == ord(" "):
|
||||
if cursor in selected:
|
||||
selected.discard(cursor)
|
||||
else:
|
||||
@@ -526,7 +555,7 @@ def _prompt_toolset_checklist(platform_label: str, enabled: Set[str]) -> Set[str
|
||||
elif key in (curses.KEY_ENTER, 10, 13):
|
||||
result_holder[0] = {CONFIGURABLE_TOOLSETS[i][0] for i in selected}
|
||||
return
|
||||
elif key in (27, ord('q')): # ESC or q
|
||||
elif key in (27, ord("q")): # ESC or q
|
||||
result_holder[0] = enabled
|
||||
return
|
||||
|
||||
@@ -565,9 +594,10 @@ def _prompt_toolset_checklist(platform_label: str, enabled: Set[str]) -> Set[str
|
||||
|
||||
# ─── Provider-Aware Configuration ────────────────────────────────────────────
|
||||
|
||||
|
||||
def _configure_toolset(ts_key: str, config: dict):
|
||||
"""Configure a toolset - provider selection + API keys.
|
||||
|
||||
|
||||
Uses TOOL_CATEGORIES for provider-aware config, falls back to simple
|
||||
env var prompts for toolsets not in TOOL_CATEGORIES.
|
||||
"""
|
||||
@@ -591,7 +621,9 @@ def _configure_tool_category(ts_key: str, cat: dict, config: dict):
|
||||
req = cat["requires_python"]
|
||||
if sys.version_info < req:
|
||||
print()
|
||||
_print_error(f" {name} requires Python {req[0]}.{req[1]}+ (current: {sys.version_info.major}.{sys.version_info.minor})")
|
||||
_print_error(
|
||||
f" {name} requires Python {req[0]}.{req[1]}+ (current: {sys.version_info.major}.{sys.version_info.minor})"
|
||||
)
|
||||
_print_info(" Upgrade Python and reinstall to enable this tool.")
|
||||
return
|
||||
|
||||
@@ -610,7 +642,7 @@ def _configure_tool_category(ts_key: str, cat: dict, config: dict):
|
||||
# Multiple providers - let user choose
|
||||
print()
|
||||
# Use custom title if provided (e.g. "Select Search Provider")
|
||||
title = cat.get("setup_title", f"Choose a provider")
|
||||
title = cat.get("setup_title", "Choose a provider")
|
||||
print(color(f" --- {icon} {name} - {title} ---", Colors.CYAN))
|
||||
if cat.get("setup_note"):
|
||||
_print_info(f" {cat['setup_note']}")
|
||||
@@ -626,7 +658,11 @@ def _configure_tool_category(ts_key: str, cat: dict, config: dict):
|
||||
if p.get("tts_provider") and config.get("tts", {}).get("provider") == p["tts_provider"]:
|
||||
configured = " [active]"
|
||||
elif not env_vars:
|
||||
configured = " [active]" if config.get("tts", {}).get("provider", "edge") == p.get("tts_provider", "") else ""
|
||||
configured = (
|
||||
" [active]"
|
||||
if config.get("tts", {}).get("provider", "edge") == p.get("tts_provider", "")
|
||||
else ""
|
||||
)
|
||||
else:
|
||||
configured = " [configured]"
|
||||
provider_choices.append(f"{p['name']}{tag}{configured}")
|
||||
@@ -688,9 +724,9 @@ def _configure_provider(provider: dict, config: dict):
|
||||
|
||||
if value:
|
||||
save_env_value(var["key"], value)
|
||||
_print_success(f" Saved")
|
||||
_print_success(" Saved")
|
||||
else:
|
||||
_print_warning(f" Skipped")
|
||||
_print_warning(" Skipped")
|
||||
all_configured = False
|
||||
|
||||
# Run post-setup hooks if needed
|
||||
@@ -721,9 +757,9 @@ def _configure_simple_requirements(ts_key: str):
|
||||
value = _prompt(f" {var}", password=True)
|
||||
if value and value.strip():
|
||||
save_env_value(var, value.strip())
|
||||
_print_success(f" Saved")
|
||||
_print_success(" Saved")
|
||||
else:
|
||||
_print_warning(f" Skipped")
|
||||
_print_warning(" Skipped")
|
||||
|
||||
|
||||
def _reconfigure_tool(config: dict):
|
||||
@@ -827,9 +863,9 @@ def _reconfigure_provider(provider: dict, config: dict):
|
||||
value = _prompt(f" {var.get('prompt', var['key'])} (Enter to keep current)", password=not default_val)
|
||||
if value and value.strip():
|
||||
save_env_value(var["key"], value.strip())
|
||||
_print_success(f" Updated")
|
||||
_print_success(" Updated")
|
||||
else:
|
||||
_print_info(f" Kept current")
|
||||
_print_info(" Kept current")
|
||||
|
||||
|
||||
def _reconfigure_simple_requirements(ts_key: str):
|
||||
@@ -851,13 +887,14 @@ def _reconfigure_simple_requirements(ts_key: str):
|
||||
value = _prompt(f" {var} (Enter to keep current)", password=True)
|
||||
if value and value.strip():
|
||||
save_env_value(var, value.strip())
|
||||
_print_success(f" Updated")
|
||||
_print_success(" Updated")
|
||||
else:
|
||||
_print_info(f" Kept current")
|
||||
_print_info(" Kept current")
|
||||
|
||||
|
||||
# ─── Main Entry Point ─────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def tools_command(args=None, first_install: bool = False, config: dict = None):
|
||||
"""Entry point for `hermes tools` and `hermes setup tools`.
|
||||
|
||||
@@ -907,7 +944,8 @@ def tools_command(args=None, first_install: bool = False, config: dict = None):
|
||||
# TTS (Edge vs OpenAI vs ElevenLabs), etc. are shown even when
|
||||
# a free provider exists.
|
||||
to_configure = [
|
||||
ts_key for ts_key in sorted(new_enabled)
|
||||
ts_key
|
||||
for ts_key in sorted(new_enabled)
|
||||
if TOOL_CATEGORIES.get(ts_key) or TOOLSET_ENV_REQUIREMENTS.get(ts_key)
|
||||
]
|
||||
|
||||
@@ -981,7 +1019,7 @@ def tools_command(args=None, first_install: bool = False, config: dict = None):
|
||||
|
||||
# Configure newly enabled toolsets that need API keys
|
||||
for ts_key in sorted(added):
|
||||
if (TOOL_CATEGORIES.get(ts_key) or TOOLSET_ENV_REQUIREMENTS.get(ts_key)):
|
||||
if TOOL_CATEGORIES.get(ts_key) or TOOLSET_ENV_REQUIREMENTS.get(ts_key):
|
||||
if not _toolset_has_keys(ts_key):
|
||||
_configure_toolset(ts_key, config)
|
||||
|
||||
|
||||
@@ -7,23 +7,25 @@ Provides options for:
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from hermes_cli.colors import Colors, color
|
||||
|
||||
|
||||
def log_info(msg: str):
|
||||
print(f"{color('→', Colors.CYAN)} {msg}")
|
||||
|
||||
|
||||
def log_success(msg: str):
|
||||
print(f"{color('✓', Colors.GREEN)} {msg}")
|
||||
|
||||
|
||||
def log_warn(msg: str):
|
||||
print(f"{color('⚠', Colors.YELLOW)} {msg}")
|
||||
|
||||
|
||||
def log_error(msg: str):
|
||||
print(f"{color('✗', Colors.RED)} {msg}")
|
||||
|
||||
@@ -42,7 +44,7 @@ def find_shell_configs() -> list:
|
||||
"""Find shell configuration files that might have PATH entries."""
|
||||
home = Path.home()
|
||||
configs = []
|
||||
|
||||
|
||||
candidates = [
|
||||
home / ".bashrc",
|
||||
home / ".bash_profile",
|
||||
@@ -50,11 +52,11 @@ def find_shell_configs() -> list:
|
||||
home / ".zshrc",
|
||||
home / ".zprofile",
|
||||
]
|
||||
|
||||
|
||||
for config in candidates:
|
||||
if config.exists():
|
||||
configs.append(config)
|
||||
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
@@ -62,45 +64,45 @@ def remove_path_from_shell_configs():
|
||||
"""Remove Hermes PATH entries from shell configuration files."""
|
||||
configs = find_shell_configs()
|
||||
removed_from = []
|
||||
|
||||
|
||||
for config_path in configs:
|
||||
try:
|
||||
content = config_path.read_text()
|
||||
original_content = content
|
||||
|
||||
|
||||
# Remove lines containing hermes-agent or hermes PATH entries
|
||||
new_lines = []
|
||||
skip_next = False
|
||||
|
||||
for line in content.split('\n'):
|
||||
|
||||
for line in content.split("\n"):
|
||||
# Skip the "# Hermes Agent" comment and following line
|
||||
if '# Hermes Agent' in line or '# hermes-agent' in line:
|
||||
if "# Hermes Agent" in line or "# hermes-agent" in line:
|
||||
skip_next = True
|
||||
continue
|
||||
if skip_next and ('hermes' in line.lower() and 'PATH' in line):
|
||||
if skip_next and ("hermes" in line.lower() and "PATH" in line):
|
||||
skip_next = False
|
||||
continue
|
||||
skip_next = False
|
||||
|
||||
|
||||
# Remove any PATH line containing hermes
|
||||
if 'hermes' in line.lower() and ('PATH=' in line or 'path=' in line.lower()):
|
||||
if "hermes" in line.lower() and ("PATH=" in line or "path=" in line.lower()):
|
||||
continue
|
||||
|
||||
|
||||
new_lines.append(line)
|
||||
|
||||
new_content = '\n'.join(new_lines)
|
||||
|
||||
|
||||
new_content = "\n".join(new_lines)
|
||||
|
||||
# Clean up multiple blank lines
|
||||
while '\n\n\n' in new_content:
|
||||
new_content = new_content.replace('\n\n\n', '\n\n')
|
||||
|
||||
while "\n\n\n" in new_content:
|
||||
new_content = new_content.replace("\n\n\n", "\n\n")
|
||||
|
||||
if new_content != original_content:
|
||||
config_path.write_text(new_content)
|
||||
removed_from.append(config_path)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
log_warn(f"Could not update {config_path}: {e}")
|
||||
|
||||
|
||||
return removed_from
|
||||
|
||||
|
||||
@@ -110,61 +112,49 @@ def remove_wrapper_script():
|
||||
Path.home() / ".local" / "bin" / "hermes",
|
||||
Path("/usr/local/bin/hermes"),
|
||||
]
|
||||
|
||||
|
||||
removed = []
|
||||
for wrapper in wrapper_paths:
|
||||
if wrapper.exists():
|
||||
try:
|
||||
# Check if it's our wrapper (contains hermes_cli reference)
|
||||
content = wrapper.read_text()
|
||||
if 'hermes_cli' in content or 'hermes-agent' in content:
|
||||
if "hermes_cli" in content or "hermes-agent" in content:
|
||||
wrapper.unlink()
|
||||
removed.append(wrapper)
|
||||
except Exception as e:
|
||||
log_warn(f"Could not remove {wrapper}: {e}")
|
||||
|
||||
|
||||
return removed
|
||||
|
||||
|
||||
def uninstall_gateway_service():
|
||||
"""Stop and uninstall the gateway service if running."""
|
||||
import platform
|
||||
|
||||
|
||||
if platform.system() != "Linux":
|
||||
return False
|
||||
|
||||
|
||||
service_file = Path.home() / ".config" / "systemd" / "user" / "hermes-gateway.service"
|
||||
|
||||
|
||||
if not service_file.exists():
|
||||
return False
|
||||
|
||||
|
||||
try:
|
||||
# Stop the service
|
||||
subprocess.run(
|
||||
["systemctl", "--user", "stop", "hermes-gateway"],
|
||||
capture_output=True,
|
||||
check=False
|
||||
)
|
||||
|
||||
subprocess.run(["systemctl", "--user", "stop", "hermes-gateway"], capture_output=True, check=False)
|
||||
|
||||
# Disable the service
|
||||
subprocess.run(
|
||||
["systemctl", "--user", "disable", "hermes-gateway"],
|
||||
capture_output=True,
|
||||
check=False
|
||||
)
|
||||
|
||||
subprocess.run(["systemctl", "--user", "disable", "hermes-gateway"], capture_output=True, check=False)
|
||||
|
||||
# Remove service file
|
||||
service_file.unlink()
|
||||
|
||||
|
||||
# Reload systemd
|
||||
subprocess.run(
|
||||
["systemctl", "--user", "daemon-reload"],
|
||||
capture_output=True,
|
||||
check=False
|
||||
)
|
||||
|
||||
subprocess.run(["systemctl", "--user", "daemon-reload"], capture_output=True, check=False)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
log_warn(f"Could not fully remove gateway service: {e}")
|
||||
return False
|
||||
@@ -173,20 +163,20 @@ def uninstall_gateway_service():
|
||||
def run_uninstall(args):
|
||||
"""
|
||||
Run the uninstall process.
|
||||
|
||||
|
||||
Options:
|
||||
- Full uninstall: removes code + ~/.hermes/ (configs, data, logs)
|
||||
- Keep data: removes code but keeps ~/.hermes/ for future reinstall
|
||||
"""
|
||||
project_root = get_project_root()
|
||||
hermes_home = get_hermes_home()
|
||||
|
||||
|
||||
print()
|
||||
print(color("┌─────────────────────────────────────────────────────────┐", Colors.MAGENTA, Colors.BOLD))
|
||||
print(color("│ ⚕ Hermes Agent Uninstaller │", Colors.MAGENTA, Colors.BOLD))
|
||||
print(color("└─────────────────────────────────────────────────────────┘", Colors.MAGENTA, Colors.BOLD))
|
||||
print()
|
||||
|
||||
|
||||
# Show what will be affected
|
||||
print(color("Current Installation:", Colors.CYAN, Colors.BOLD))
|
||||
print(f" Code: {project_root}")
|
||||
@@ -194,7 +184,7 @@ def run_uninstall(args):
|
||||
print(f" Secrets: {hermes_home / '.env'}")
|
||||
print(f" Data: {hermes_home / 'cron/'}, {hermes_home / 'sessions/'}, {hermes_home / 'logs/'}")
|
||||
print()
|
||||
|
||||
|
||||
# Ask for confirmation
|
||||
print(color("Uninstall Options:", Colors.YELLOW, Colors.BOLD))
|
||||
print()
|
||||
@@ -206,21 +196,21 @@ def run_uninstall(args):
|
||||
print()
|
||||
print(" 3) " + color("Cancel", Colors.CYAN) + " - Don't uninstall")
|
||||
print()
|
||||
|
||||
|
||||
try:
|
||||
choice = input(color("Select option [1/2/3]: ", Colors.BOLD)).strip()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
print()
|
||||
print("Cancelled.")
|
||||
return
|
||||
|
||||
|
||||
if choice == "3" or choice.lower() in ("c", "cancel", "q", "quit", "n", "no"):
|
||||
print()
|
||||
print("Uninstall cancelled.")
|
||||
return
|
||||
|
||||
full_uninstall = (choice == "2")
|
||||
|
||||
|
||||
full_uninstall = choice == "2"
|
||||
|
||||
# Final confirmation
|
||||
print()
|
||||
if full_uninstall:
|
||||
@@ -228,7 +218,7 @@ def run_uninstall(args):
|
||||
print(color(" Including: configs, API keys, sessions, scheduled jobs, logs", Colors.RED))
|
||||
else:
|
||||
print("This will remove the Hermes code but keep your configuration and data.")
|
||||
|
||||
|
||||
print()
|
||||
try:
|
||||
confirm = input(f"Type '{color('yes', Colors.YELLOW)}' to confirm: ").strip().lower()
|
||||
@@ -236,23 +226,23 @@ def run_uninstall(args):
|
||||
print()
|
||||
print("Cancelled.")
|
||||
return
|
||||
|
||||
|
||||
if confirm != "yes":
|
||||
print()
|
||||
print("Uninstall cancelled.")
|
||||
return
|
||||
|
||||
|
||||
print()
|
||||
print(color("Uninstalling...", Colors.CYAN, Colors.BOLD))
|
||||
print()
|
||||
|
||||
|
||||
# 1. Stop and uninstall gateway service
|
||||
log_info("Checking for gateway service...")
|
||||
if uninstall_gateway_service():
|
||||
log_success("Gateway service stopped and removed")
|
||||
else:
|
||||
log_info("No gateway service found")
|
||||
|
||||
|
||||
# 2. Remove PATH entries from shell configs
|
||||
log_info("Removing PATH entries from shell configs...")
|
||||
removed_configs = remove_path_from_shell_configs()
|
||||
@@ -261,7 +251,7 @@ def run_uninstall(args):
|
||||
log_success(f"Updated {config}")
|
||||
else:
|
||||
log_info("No PATH entries found to remove")
|
||||
|
||||
|
||||
# 3. Remove wrapper script
|
||||
log_info("Removing hermes command...")
|
||||
removed_wrappers = remove_wrapper_script()
|
||||
@@ -270,10 +260,10 @@ def run_uninstall(args):
|
||||
log_success(f"Removed {wrapper}")
|
||||
else:
|
||||
log_info("No wrapper script found")
|
||||
|
||||
|
||||
# 4. Remove installation directory (code)
|
||||
log_info(f"Removing installation directory...")
|
||||
|
||||
log_info("Removing installation directory...")
|
||||
|
||||
# Check if we're running from within the install dir
|
||||
# We need to be careful here
|
||||
try:
|
||||
@@ -289,7 +279,7 @@ def run_uninstall(args):
|
||||
except Exception as e:
|
||||
log_warn(f"Could not fully remove {project_root}: {e}")
|
||||
log_info("You may need to manually remove it")
|
||||
|
||||
|
||||
# 5. Optionally remove ~/.hermes/ data directory
|
||||
if full_uninstall:
|
||||
log_info("Removing configuration and data...")
|
||||
@@ -302,22 +292,27 @@ def run_uninstall(args):
|
||||
log_info("You may need to manually remove it")
|
||||
else:
|
||||
log_info(f"Keeping configuration and data in {hermes_home}")
|
||||
|
||||
|
||||
# Done
|
||||
print()
|
||||
print(color("┌─────────────────────────────────────────────────────────┐", Colors.GREEN, Colors.BOLD))
|
||||
print(color("│ ✓ Uninstall Complete! │", Colors.GREEN, Colors.BOLD))
|
||||
print(color("└─────────────────────────────────────────────────────────┘", Colors.GREEN, Colors.BOLD))
|
||||
print()
|
||||
|
||||
|
||||
if not full_uninstall:
|
||||
print(color("Your configuration and data have been preserved:", Colors.CYAN))
|
||||
print(f" {hermes_home}/")
|
||||
print()
|
||||
print("To reinstall later with your existing settings:")
|
||||
print(color(" curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash", Colors.DIM))
|
||||
print(
|
||||
color(
|
||||
" curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash",
|
||||
Colors.DIM,
|
||||
)
|
||||
)
|
||||
print()
|
||||
|
||||
|
||||
print(color("Reload your shell to complete the process:", Colors.YELLOW))
|
||||
print(" source ~/.bashrc # or ~/.zshrc")
|
||||
print()
|
||||
|
||||
@@ -19,8 +19,7 @@ import os
|
||||
import sqlite3
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
from typing import Any
|
||||
|
||||
DEFAULT_DB_PATH = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes")) / "state.db"
|
||||
|
||||
@@ -156,8 +155,7 @@ class SessionDB:
|
||||
# since the title column is guaranteed to exist at this point)
|
||||
try:
|
||||
cursor.execute(
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_sessions_title_unique "
|
||||
"ON sessions(title) WHERE title IS NOT NULL"
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_sessions_title_unique ON sessions(title) WHERE title IS NOT NULL"
|
||||
)
|
||||
except sqlite3.OperationalError:
|
||||
pass # Index already exists
|
||||
@@ -185,7 +183,7 @@ class SessionDB:
|
||||
session_id: str,
|
||||
source: str,
|
||||
model: str = None,
|
||||
model_config: Dict[str, Any] = None,
|
||||
model_config: dict[str, Any] = None,
|
||||
system_prompt: str = None,
|
||||
user_id: str = None,
|
||||
parent_session_id: str = None,
|
||||
@@ -225,9 +223,7 @@ class SessionDB:
|
||||
)
|
||||
self._conn.commit()
|
||||
|
||||
def update_token_counts(
|
||||
self, session_id: str, input_tokens: int = 0, output_tokens: int = 0
|
||||
) -> None:
|
||||
def update_token_counts(self, session_id: str, input_tokens: int = 0, output_tokens: int = 0) -> None:
|
||||
"""Increment token counters on a session."""
|
||||
self._conn.execute(
|
||||
"""UPDATE sessions SET
|
||||
@@ -238,11 +234,9 @@ class SessionDB:
|
||||
)
|
||||
self._conn.commit()
|
||||
|
||||
def get_session(self, session_id: str) -> Optional[Dict[str, Any]]:
|
||||
def get_session(self, session_id: str) -> dict[str, Any] | None:
|
||||
"""Get a session by ID."""
|
||||
cursor = self._conn.execute(
|
||||
"SELECT * FROM sessions WHERE id = ?", (session_id,)
|
||||
)
|
||||
cursor = self._conn.execute("SELECT * FROM sessions WHERE id = ?", (session_id,))
|
||||
row = cursor.fetchone()
|
||||
return dict(row) if row else None
|
||||
|
||||
@@ -250,7 +244,7 @@ class SessionDB:
|
||||
MAX_TITLE_LENGTH = 100
|
||||
|
||||
@staticmethod
|
||||
def sanitize_title(title: Optional[str]) -> Optional[str]:
|
||||
def sanitize_title(title: str | None) -> str | None:
|
||||
"""Validate and sanitize a session title.
|
||||
|
||||
- Strips leading/trailing whitespace
|
||||
@@ -271,27 +265,26 @@ class SessionDB:
|
||||
# Remove ASCII control characters (0x00-0x1F, 0x7F) but keep
|
||||
# whitespace chars (\t=0x09, \n=0x0A, \r=0x0D) so they can be
|
||||
# normalized to spaces by the whitespace collapsing step below
|
||||
cleaned = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]', '', title)
|
||||
cleaned = re.sub(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]", "", title)
|
||||
|
||||
# Remove problematic Unicode control characters:
|
||||
# - Zero-width chars (U+200B-U+200F, U+FEFF)
|
||||
# - Directional overrides (U+202A-U+202E, U+2066-U+2069)
|
||||
# - Object replacement (U+FFFC), interlinear annotation (U+FFF9-U+FFFB)
|
||||
cleaned = re.sub(
|
||||
r'[\u200b-\u200f\u2028-\u202e\u2060-\u2069\ufeff\ufffc\ufff9-\ufffb]',
|
||||
'', cleaned,
|
||||
r"[\u200b-\u200f\u2028-\u202e\u2060-\u2069\ufeff\ufffc\ufff9-\ufffb]",
|
||||
"",
|
||||
cleaned,
|
||||
)
|
||||
|
||||
# Collapse internal whitespace runs and strip
|
||||
cleaned = re.sub(r'\s+', ' ', cleaned).strip()
|
||||
cleaned = re.sub(r"\s+", " ", cleaned).strip()
|
||||
|
||||
if not cleaned:
|
||||
return None
|
||||
|
||||
if len(cleaned) > SessionDB.MAX_TITLE_LENGTH:
|
||||
raise ValueError(
|
||||
f"Title too long ({len(cleaned)} chars, max {SessionDB.MAX_TITLE_LENGTH})"
|
||||
)
|
||||
raise ValueError(f"Title too long ({len(cleaned)} chars, max {SessionDB.MAX_TITLE_LENGTH})")
|
||||
|
||||
return cleaned
|
||||
|
||||
@@ -312,9 +305,7 @@ class SessionDB:
|
||||
)
|
||||
conflict = cursor.fetchone()
|
||||
if conflict:
|
||||
raise ValueError(
|
||||
f"Title '{title}' is already in use by session {conflict['id']}"
|
||||
)
|
||||
raise ValueError(f"Title '{title}' is already in use by session {conflict['id']}")
|
||||
cursor = self._conn.execute(
|
||||
"UPDATE sessions SET title = ? WHERE id = ?",
|
||||
(title, session_id),
|
||||
@@ -322,23 +313,19 @@ class SessionDB:
|
||||
self._conn.commit()
|
||||
return cursor.rowcount > 0
|
||||
|
||||
def get_session_title(self, session_id: str) -> Optional[str]:
|
||||
def get_session_title(self, session_id: str) -> str | None:
|
||||
"""Get the title for a session, or None."""
|
||||
cursor = self._conn.execute(
|
||||
"SELECT title FROM sessions WHERE id = ?", (session_id,)
|
||||
)
|
||||
cursor = self._conn.execute("SELECT title FROM sessions WHERE id = ?", (session_id,))
|
||||
row = cursor.fetchone()
|
||||
return row["title"] if row else None
|
||||
|
||||
def get_session_by_title(self, title: str) -> Optional[Dict[str, Any]]:
|
||||
def get_session_by_title(self, title: str) -> dict[str, Any] | None:
|
||||
"""Look up a session by exact title. Returns session dict or None."""
|
||||
cursor = self._conn.execute(
|
||||
"SELECT * FROM sessions WHERE title = ?", (title,)
|
||||
)
|
||||
cursor = self._conn.execute("SELECT * FROM sessions WHERE title = ?", (title,))
|
||||
row = cursor.fetchone()
|
||||
return dict(row) if row else None
|
||||
|
||||
def resolve_session_by_title(self, title: str) -> Optional[str]:
|
||||
def resolve_session_by_title(self, title: str) -> str | None:
|
||||
"""Resolve a title to a session ID, preferring the latest in a lineage.
|
||||
|
||||
If the exact title exists, returns that session's ID.
|
||||
@@ -353,8 +340,7 @@ class SessionDB:
|
||||
# Escape SQL LIKE wildcards (%, _) in the title to prevent false matches
|
||||
escaped = title.replace("\\", "\\\\").replace("%", "\\%").replace("_", "\\_")
|
||||
cursor = self._conn.execute(
|
||||
"SELECT id, title, started_at FROM sessions "
|
||||
"WHERE title LIKE ? ESCAPE '\\' ORDER BY started_at DESC",
|
||||
"SELECT id, title, started_at FROM sessions WHERE title LIKE ? ESCAPE '\\' ORDER BY started_at DESC",
|
||||
(f"{escaped} #%",),
|
||||
)
|
||||
numbered = cursor.fetchall()
|
||||
@@ -373,8 +359,9 @@ class SessionDB:
|
||||
the highest existing number and increments.
|
||||
"""
|
||||
import re
|
||||
|
||||
# Strip existing #N suffix to find the true base
|
||||
match = re.match(r'^(.*?) #(\d+)$', base_title)
|
||||
match = re.match(r"^(.*?) #(\d+)$", base_title)
|
||||
if match:
|
||||
base = match.group(1)
|
||||
else:
|
||||
@@ -395,7 +382,7 @@ class SessionDB:
|
||||
# Find the highest number
|
||||
max_num = 1 # The unnumbered original counts as #1
|
||||
for t in existing:
|
||||
m = re.match(r'^.* #(\d+)$', t)
|
||||
m = re.match(r"^.* #(\d+)$", t)
|
||||
if m:
|
||||
max_num = max(max_num, int(m.group(1)))
|
||||
|
||||
@@ -406,7 +393,7 @@ class SessionDB:
|
||||
source: str = None,
|
||||
limit: int = 20,
|
||||
offset: int = 0,
|
||||
) -> List[Dict[str, Any]]:
|
||||
) -> list[dict[str, Any]]:
|
||||
"""List sessions with preview (first user message) and last active timestamp.
|
||||
|
||||
Returns dicts with keys: id, source, model, title, started_at, ended_at,
|
||||
@@ -506,7 +493,7 @@ class SessionDB:
|
||||
self._conn.commit()
|
||||
return msg_id
|
||||
|
||||
def get_messages(self, session_id: str) -> List[Dict[str, Any]]:
|
||||
def get_messages(self, session_id: str) -> list[dict[str, Any]]:
|
||||
"""Load all messages for a session, ordered by timestamp."""
|
||||
cursor = self._conn.execute(
|
||||
"SELECT * FROM messages WHERE session_id = ? ORDER BY timestamp, id",
|
||||
@@ -524,7 +511,7 @@ class SessionDB:
|
||||
result.append(msg)
|
||||
return result
|
||||
|
||||
def get_messages_as_conversation(self, session_id: str) -> List[Dict[str, Any]]:
|
||||
def get_messages_as_conversation(self, session_id: str) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Load messages in the OpenAI conversation format (role + content dicts).
|
||||
Used by the gateway to restore conversation history.
|
||||
@@ -556,11 +543,11 @@ class SessionDB:
|
||||
def search_messages(
|
||||
self,
|
||||
query: str,
|
||||
source_filter: List[str] = None,
|
||||
role_filter: List[str] = None,
|
||||
source_filter: list[str] = None,
|
||||
role_filter: list[str] = None,
|
||||
limit: int = 20,
|
||||
offset: int = 0,
|
||||
) -> List[Dict[str, Any]]:
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Full-text search across session messages using FTS5.
|
||||
|
||||
@@ -628,8 +615,7 @@ class SessionDB:
|
||||
(match["session_id"], match["id"], match["id"]),
|
||||
)
|
||||
context_msgs = [
|
||||
{"role": r["role"], "content": (r["content"] or "")[:200]}
|
||||
for r in ctx_cursor.fetchall()
|
||||
{"role": r["role"], "content": (r["content"] or "")[:200]} for r in ctx_cursor.fetchall()
|
||||
]
|
||||
match["context"] = context_msgs
|
||||
except Exception:
|
||||
@@ -645,7 +631,7 @@ class SessionDB:
|
||||
source: str = None,
|
||||
limit: int = 20,
|
||||
offset: int = 0,
|
||||
) -> List[Dict[str, Any]]:
|
||||
) -> list[dict[str, Any]]:
|
||||
"""List sessions, optionally filtered by source."""
|
||||
if source:
|
||||
cursor = self._conn.execute(
|
||||
@@ -666,9 +652,7 @@ class SessionDB:
|
||||
def session_count(self, source: str = None) -> int:
|
||||
"""Count sessions, optionally filtered by source."""
|
||||
if source:
|
||||
cursor = self._conn.execute(
|
||||
"SELECT COUNT(*) FROM sessions WHERE source = ?", (source,)
|
||||
)
|
||||
cursor = self._conn.execute("SELECT COUNT(*) FROM sessions WHERE source = ?", (source,))
|
||||
else:
|
||||
cursor = self._conn.execute("SELECT COUNT(*) FROM sessions")
|
||||
return cursor.fetchone()[0]
|
||||
@@ -676,9 +660,7 @@ class SessionDB:
|
||||
def message_count(self, session_id: str = None) -> int:
|
||||
"""Count messages, optionally for a specific session."""
|
||||
if session_id:
|
||||
cursor = self._conn.execute(
|
||||
"SELECT COUNT(*) FROM messages WHERE session_id = ?", (session_id,)
|
||||
)
|
||||
cursor = self._conn.execute("SELECT COUNT(*) FROM messages WHERE session_id = ?", (session_id,))
|
||||
else:
|
||||
cursor = self._conn.execute("SELECT COUNT(*) FROM messages")
|
||||
return cursor.fetchone()[0]
|
||||
@@ -687,7 +669,7 @@ class SessionDB:
|
||||
# Export and cleanup
|
||||
# =========================================================================
|
||||
|
||||
def export_session(self, session_id: str) -> Optional[Dict[str, Any]]:
|
||||
def export_session(self, session_id: str) -> dict[str, Any] | None:
|
||||
"""Export a single session with all its messages as a dict."""
|
||||
session = self.get_session(session_id)
|
||||
if not session:
|
||||
@@ -695,7 +677,7 @@ class SessionDB:
|
||||
messages = self.get_messages(session_id)
|
||||
return {**session, "messages": messages}
|
||||
|
||||
def export_all(self, source: str = None) -> List[Dict[str, Any]]:
|
||||
def export_all(self, source: str = None) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Export all sessions (with messages) as a list of dicts.
|
||||
Suitable for writing to a JSONL file for backup/analysis.
|
||||
@@ -709,9 +691,7 @@ class SessionDB:
|
||||
|
||||
def clear_messages(self, session_id: str) -> None:
|
||||
"""Delete all messages for a session and reset its counters."""
|
||||
self._conn.execute(
|
||||
"DELETE FROM messages WHERE session_id = ?", (session_id,)
|
||||
)
|
||||
self._conn.execute("DELETE FROM messages WHERE session_id = ?", (session_id,))
|
||||
self._conn.execute(
|
||||
"UPDATE sessions SET message_count = 0, tool_call_count = 0 WHERE id = ?",
|
||||
(session_id,),
|
||||
@@ -720,9 +700,7 @@ class SessionDB:
|
||||
|
||||
def delete_session(self, session_id: str) -> bool:
|
||||
"""Delete a session and all its messages. Returns True if found."""
|
||||
cursor = self._conn.execute(
|
||||
"SELECT COUNT(*) FROM sessions WHERE id = ?", (session_id,)
|
||||
)
|
||||
cursor = self._conn.execute("SELECT COUNT(*) FROM sessions WHERE id = ?", (session_id,))
|
||||
if cursor.fetchone()[0] == 0:
|
||||
return False
|
||||
self._conn.execute("DELETE FROM messages WHERE session_id = ?", (session_id,))
|
||||
@@ -736,6 +714,7 @@ class SessionDB:
|
||||
Only prunes ended sessions (not active ones).
|
||||
"""
|
||||
import time as _time
|
||||
|
||||
cutoff = _time.time() - (older_than_days * 86400)
|
||||
|
||||
if source:
|
||||
|
||||
@@ -20,11 +20,10 @@ Public API (signatures preserved from the original 2,400-line version):
|
||||
check_tool_availability(quiet) -> tuple
|
||||
"""
|
||||
|
||||
import json
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
from typing import Any
|
||||
|
||||
from tools.registry import registry
|
||||
from toolsets import resolve_toolset, validate_toolset
|
||||
@@ -36,6 +35,7 @@ logger = logging.getLogger(__name__)
|
||||
# Async Bridging (single source of truth -- used by registry.dispatch too)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _run_async(coro):
|
||||
"""Run an async coroutine from a sync context.
|
||||
|
||||
@@ -56,6 +56,7 @@ def _run_async(coro):
|
||||
|
||||
if loop and loop.is_running():
|
||||
import concurrent.futures
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
|
||||
future = pool.submit(asyncio.run, coro)
|
||||
return future.result(timeout=300)
|
||||
@@ -66,6 +67,7 @@ def _run_async(coro):
|
||||
# Tool Discovery (importing each module triggers its registry.register calls)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def _discover_tools():
|
||||
"""Import all tool modules to trigger their registry.register() calls.
|
||||
|
||||
@@ -97,6 +99,7 @@ def _discover_tools():
|
||||
"tools.homeassistant_tool",
|
||||
]
|
||||
import importlib
|
||||
|
||||
for mod_name in _modules:
|
||||
try:
|
||||
importlib.import_module(mod_name)
|
||||
@@ -109,6 +112,7 @@ _discover_tools()
|
||||
# MCP tool discovery (external MCP servers from config)
|
||||
try:
|
||||
from tools.mcp_tool import discover_mcp_tools
|
||||
|
||||
discover_mcp_tools()
|
||||
except Exception as e:
|
||||
logger.debug("MCP tool discovery failed: %s", e)
|
||||
@@ -118,13 +122,13 @@ except Exception as e:
|
||||
# Backward-compat constants (built once after discovery)
|
||||
# =============================================================================
|
||||
|
||||
TOOL_TO_TOOLSET_MAP: Dict[str, str] = registry.get_tool_to_toolset_map()
|
||||
TOOL_TO_TOOLSET_MAP: dict[str, str] = registry.get_tool_to_toolset_map()
|
||||
|
||||
TOOLSET_REQUIREMENTS: Dict[str, dict] = registry.get_toolset_requirements()
|
||||
TOOLSET_REQUIREMENTS: dict[str, dict] = registry.get_toolset_requirements()
|
||||
|
||||
# Resolved tool names from the last get_tool_definitions() call.
|
||||
# Used by code_execution_tool to know which tools are available in this session.
|
||||
_last_resolved_tool_names: List[str] = []
|
||||
_last_resolved_tool_names: list[str] = []
|
||||
|
||||
|
||||
# =============================================================================
|
||||
@@ -139,18 +143,29 @@ _LEGACY_TOOLSET_MAP = {
|
||||
"image_tools": ["image_generate"],
|
||||
"skills_tools": ["skills_list", "skill_view", "skill_manage"],
|
||||
"browser_tools": [
|
||||
"browser_navigate", "browser_snapshot", "browser_click",
|
||||
"browser_type", "browser_scroll", "browser_back",
|
||||
"browser_press", "browser_close", "browser_get_images",
|
||||
"browser_vision"
|
||||
"browser_navigate",
|
||||
"browser_snapshot",
|
||||
"browser_click",
|
||||
"browser_type",
|
||||
"browser_scroll",
|
||||
"browser_back",
|
||||
"browser_press",
|
||||
"browser_close",
|
||||
"browser_get_images",
|
||||
"browser_vision",
|
||||
],
|
||||
"cronjob_tools": ["schedule_cronjob", "list_cronjobs", "remove_cronjob"],
|
||||
"rl_tools": [
|
||||
"rl_list_environments", "rl_select_environment",
|
||||
"rl_get_current_config", "rl_edit_config",
|
||||
"rl_start_training", "rl_check_status",
|
||||
"rl_stop_training", "rl_get_results",
|
||||
"rl_list_runs", "rl_test_inference"
|
||||
"rl_list_environments",
|
||||
"rl_select_environment",
|
||||
"rl_get_current_config",
|
||||
"rl_edit_config",
|
||||
"rl_start_training",
|
||||
"rl_check_status",
|
||||
"rl_stop_training",
|
||||
"rl_get_results",
|
||||
"rl_list_runs",
|
||||
"rl_test_inference",
|
||||
],
|
||||
"file_tools": ["read_file", "write_file", "patch", "search_files"],
|
||||
"tts_tools": ["text_to_speech"],
|
||||
@@ -161,11 +176,12 @@ _LEGACY_TOOLSET_MAP = {
|
||||
# get_tool_definitions (the main schema provider)
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def get_tool_definitions(
|
||||
enabled_toolsets: List[str] = None,
|
||||
disabled_toolsets: List[str] = None,
|
||||
enabled_toolsets: list[str] = None,
|
||||
disabled_toolsets: list[str] = None,
|
||||
quiet_mode: bool = False,
|
||||
) -> List[Dict[str, Any]]:
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Get tool definitions for model API calls with toolset-based filtering.
|
||||
|
||||
@@ -200,6 +216,7 @@ def get_tool_definitions(
|
||||
|
||||
elif disabled_toolsets:
|
||||
from toolsets import get_all_toolsets
|
||||
|
||||
for ts_name in get_all_toolsets():
|
||||
tools_to_include.update(resolve_toolset(ts_name))
|
||||
|
||||
@@ -219,6 +236,7 @@ def get_tool_definitions(
|
||||
print(f"⚠️ Unknown toolset: {toolset_name}")
|
||||
else:
|
||||
from toolsets import get_all_toolsets
|
||||
|
||||
for ts_name in get_all_toolsets():
|
||||
tools_to_include.update(resolve_toolset(ts_name))
|
||||
|
||||
@@ -230,6 +248,7 @@ def get_tool_definitions(
|
||||
# execute_code" even when the user disabled the web toolset (#560-discord).
|
||||
if "execute_code" in tools_to_include:
|
||||
from tools.code_execution_tool import SANDBOX_ALLOWED_TOOLS, build_execute_code_schema
|
||||
|
||||
sandbox_enabled = SANDBOX_ALLOWED_TOOLS & tools_to_include
|
||||
dynamic_schema = build_execute_code_schema(sandbox_enabled)
|
||||
for i, td in enumerate(filtered_tools):
|
||||
@@ -263,9 +282,9 @@ _AGENT_LOOP_TOOLS = {"todo", "memory", "session_search", "delegate_task"}
|
||||
|
||||
def handle_function_call(
|
||||
function_name: str,
|
||||
function_args: Dict[str, Any],
|
||||
task_id: Optional[str] = None,
|
||||
user_task: Optional[str] = None,
|
||||
function_args: dict[str, Any],
|
||||
task_id: str | None = None,
|
||||
user_task: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Main function call dispatcher that routes calls to the tool registry.
|
||||
@@ -285,13 +304,15 @@ def handle_function_call(
|
||||
|
||||
if function_name == "execute_code":
|
||||
return registry.dispatch(
|
||||
function_name, function_args,
|
||||
function_name,
|
||||
function_args,
|
||||
task_id=task_id,
|
||||
enabled_tools=_last_resolved_tool_names,
|
||||
)
|
||||
|
||||
return registry.dispatch(
|
||||
function_name, function_args,
|
||||
function_name,
|
||||
function_args,
|
||||
task_id=task_id,
|
||||
user_task=user_task,
|
||||
)
|
||||
@@ -306,26 +327,27 @@ def handle_function_call(
|
||||
# Backward-compat wrapper functions
|
||||
# =============================================================================
|
||||
|
||||
def get_all_tool_names() -> List[str]:
|
||||
|
||||
def get_all_tool_names() -> list[str]:
|
||||
"""Return all registered tool names."""
|
||||
return registry.get_all_tool_names()
|
||||
|
||||
|
||||
def get_toolset_for_tool(tool_name: str) -> Optional[str]:
|
||||
def get_toolset_for_tool(tool_name: str) -> str | None:
|
||||
"""Return the toolset a tool belongs to."""
|
||||
return registry.get_toolset_for_tool(tool_name)
|
||||
|
||||
|
||||
def get_available_toolsets() -> Dict[str, dict]:
|
||||
def get_available_toolsets() -> dict[str, dict]:
|
||||
"""Return toolset availability info for UI display."""
|
||||
return registry.get_available_toolsets()
|
||||
|
||||
|
||||
def check_toolset_requirements() -> Dict[str, bool]:
|
||||
def check_toolset_requirements() -> dict[str, bool]:
|
||||
"""Return {toolset: available_bool} for every registered toolset."""
|
||||
return registry.check_toolset_requirements()
|
||||
|
||||
|
||||
def check_tool_availability(quiet: bool = False) -> Tuple[List[str], List[dict]]:
|
||||
def check_tool_availability(quiet: bool = False) -> tuple[list[str], list[dict]]:
|
||||
"""Return (available_toolsets, unavailable_info)."""
|
||||
return registry.check_tool_availability(quiet=quiet)
|
||||
|
||||
@@ -40,7 +40,7 @@ dependencies = [
|
||||
[project.optional-dependencies]
|
||||
modal = ["swe-rex[modal]>=1.4.0"]
|
||||
daytona = ["daytona>=0.148.0"]
|
||||
dev = ["pytest", "pytest-asyncio"]
|
||||
dev = ["pytest", "pytest-asyncio", "mcp>=1.2.0", "ruff", "pre-commit", "watchfiles"]
|
||||
messaging = ["python-telegram-bot>=20.0", "discord.py>=2.0", "aiohttp>=3.9.0", "slack-bolt>=1.18.0", "slack-sdk>=3.27.0"]
|
||||
cron = ["croniter"]
|
||||
slack = ["slack-bolt>=1.18.0", "slack-sdk>=3.27.0"]
|
||||
@@ -76,6 +76,46 @@ py-modules = ["run_agent", "model_tools", "toolsets", "batch_runner", "trajector
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["tools", "hermes_cli", "gateway", "cron", "honcho_integration"]
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py311"
|
||||
line-length = 120
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "F", "W", "I", "UP", "B", "SIM"]
|
||||
ignore = [
|
||||
"E402", # late imports — intentional throughout codebase
|
||||
"E501", # line too long — handled by formatter where it can
|
||||
"E731", # lambda assignments — used in registry pattern
|
||||
"E741", # ambiguous variable name — existing patterns
|
||||
"F811", # redefined unused — intentional overrides
|
||||
"F841", # unused variable — cleanup separately
|
||||
"B007", # unused loop variable — cleanup separately
|
||||
"B904", # raise from — too noisy to gate on
|
||||
"B905", # zip strict — cleanup separately
|
||||
"B027", # empty method without abstract decorator
|
||||
"SIM102", # collapsible if — readability preference
|
||||
"SIM103", # needless bool — readability preference
|
||||
"SIM105", # suppressible exception — existing pattern
|
||||
"SIM108", # ternary — readability preference
|
||||
"SIM110", # reimplemented builtin
|
||||
"SIM112", # uncapitalized env var
|
||||
"SIM115", # open file with context handler
|
||||
"SIM117", # multiple with statements
|
||||
"SIM118", # in-dict-keys — cleanup separately
|
||||
"SIM212", # if-expr twisted arms
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"batch_runner.py" = ["F821"]
|
||||
"tools/patch_parser.py" = ["F821"]
|
||||
"gateway/run.py" = ["F821"]
|
||||
"gateway/channel_directory.py" = ["F401"]
|
||||
"hermes_cli/doctor.py" = ["F401"]
|
||||
"tools/image_generation_tool.py" = ["F401"]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
known-first-party = ["tools", "hermes_cli", "gateway", "agent", "cron"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
markers = [
|
||||
|
||||
1571
run_agent.py
1571
run_agent.py
File diff suppressed because it is too large
Load Diff
3
skills/creative/DESCRIPTION.md
Normal file
3
skills/creative/DESCRIPTION.md
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
description: Creative content generation — ASCII art, hand-drawn style diagrams, and visual design tools.
|
||||
---
|
||||
69
skills/leisure/find-nearby/SKILL.md
Normal file
69
skills/leisure/find-nearby/SKILL.md
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
name: find-nearby
|
||||
description: Find nearby places (restaurants, cafes, bars, pharmacies, etc.) using OpenStreetMap. Works with coordinates, addresses, cities, zip codes, or Telegram location pins. No API keys needed.
|
||||
version: 1.0.0
|
||||
metadata:
|
||||
hermes:
|
||||
tags: [location, maps, nearby, places, restaurants, local]
|
||||
related_skills: []
|
||||
---
|
||||
|
||||
# Find Nearby — Local Place Discovery
|
||||
|
||||
Find restaurants, cafes, bars, pharmacies, and other places near any location. Uses OpenStreetMap (free, no API keys). Works with:
|
||||
|
||||
- **Coordinates** from Telegram location pins (latitude/longitude in conversation)
|
||||
- **Addresses** ("near 123 Main St, Springfield")
|
||||
- **Cities** ("restaurants in downtown Austin")
|
||||
- **Zip codes** ("pharmacies near 90210")
|
||||
- **Landmarks** ("cafes near Times Square")
|
||||
|
||||
## Quick Reference
|
||||
|
||||
```bash
|
||||
# By coordinates (from Telegram location pin or user-provided)
|
||||
python3 SKILL_DIR/scripts/find_nearby.py --lat <LAT> --lon <LON> --type restaurant --radius 1500
|
||||
|
||||
# By address, city, or landmark (auto-geocoded)
|
||||
python3 SKILL_DIR/scripts/find_nearby.py --near "Times Square, New York" --type cafe
|
||||
|
||||
# Multiple place types
|
||||
python3 SKILL_DIR/scripts/find_nearby.py --near "downtown austin" --type restaurant --type bar --limit 10
|
||||
|
||||
# JSON output
|
||||
python3 SKILL_DIR/scripts/find_nearby.py --near "90210" --type pharmacy --json
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
| Flag | Description | Default |
|
||||
|------|-------------|---------|
|
||||
| `--lat`, `--lon` | Exact coordinates | — |
|
||||
| `--near` | Address, city, zip, or landmark (geocoded) | — |
|
||||
| `--type` | Place type (repeatable for multiple) | restaurant |
|
||||
| `--radius` | Search radius in meters | 1500 |
|
||||
| `--limit` | Max results | 15 |
|
||||
| `--json` | Machine-readable JSON output | off |
|
||||
|
||||
### Common Place Types
|
||||
|
||||
`restaurant`, `cafe`, `bar`, `pub`, `fast_food`, `pharmacy`, `hospital`, `bank`, `atm`, `fuel`, `parking`, `supermarket`, `convenience`, `hotel`
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Get the location.** Look for coordinates (`latitude: ... / longitude: ...`) from a Telegram pin, or ask the user for an address/city/zip.
|
||||
|
||||
2. **Ask for preferences** (only if not already stated): place type, how far they're willing to go, any specifics (cuisine, "open now", etc.).
|
||||
|
||||
3. **Run the script** with appropriate flags. Use `--json` if you need to process results programmatically.
|
||||
|
||||
4. **Present results** with names, distances, and Google Maps links. If the user asked about hours or "open now," check the `hours` field in results — if missing or unclear, verify with `web_search`.
|
||||
|
||||
5. **For directions**, use the `directions_url` from results, or construct: `https://www.google.com/maps/dir/?api=1&origin=<LAT>,<LON>&destination=<LAT>,<LON>`
|
||||
|
||||
## Tips
|
||||
|
||||
- If results are sparse, widen the radius (1500 → 3000m)
|
||||
- For "open now" requests: check the `hours` field in results, cross-reference with `web_search` for accuracy since OSM hours aren't always complete
|
||||
- Zip codes alone can be ambiguous globally — prompt the user for country/state if results look wrong
|
||||
- The script uses OpenStreetMap data which is community-maintained; coverage varies by region
|
||||
184
skills/leisure/find-nearby/scripts/find_nearby.py
Normal file
184
skills/leisure/find-nearby/scripts/find_nearby.py
Normal file
@@ -0,0 +1,184 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Find nearby places using OpenStreetMap (Overpass + Nominatim). No API keys needed.
|
||||
|
||||
Usage:
|
||||
# By coordinates
|
||||
python find_nearby.py --lat 36.17 --lon -115.14 --type restaurant --radius 1500
|
||||
|
||||
# By address/city/zip (auto-geocoded)
|
||||
python find_nearby.py --near "Times Square, New York" --type cafe --radius 1000
|
||||
python find_nearby.py --near "90210" --type pharmacy
|
||||
|
||||
# Multiple types
|
||||
python find_nearby.py --lat 36.17 --lon -115.14 --type restaurant --type bar
|
||||
|
||||
# JSON output for programmatic use
|
||||
python find_nearby.py --near "downtown las vegas" --type restaurant --json
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import math
|
||||
import sys
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from typing import Any
|
||||
|
||||
OVERPASS_URLS = [
|
||||
"https://overpass-api.de/api/interpreter",
|
||||
"https://overpass.kumi.systems/api/interpreter",
|
||||
]
|
||||
NOMINATIM_URL = "https://nominatim.openstreetmap.org/search"
|
||||
USER_AGENT = "HermesAgent/1.0 (find-nearby skill)"
|
||||
TIMEOUT = 15
|
||||
|
||||
|
||||
def _http_get(url: str) -> Any:
|
||||
req = urllib.request.Request(url, headers={"User-Agent": USER_AGENT})
|
||||
with urllib.request.urlopen(req, timeout=TIMEOUT) as r:
|
||||
return json.loads(r.read())
|
||||
|
||||
|
||||
def _http_post(url: str, data: str) -> Any:
|
||||
req = urllib.request.Request(
|
||||
url, data=data.encode(), headers={"User-Agent": USER_AGENT}
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=TIMEOUT) as r:
|
||||
return json.loads(r.read())
|
||||
|
||||
|
||||
def haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
|
||||
"""Distance in meters between two coordinates."""
|
||||
R = 6_371_000
|
||||
rlat1, rlat2 = math.radians(lat1), math.radians(lat2)
|
||||
dlat = math.radians(lat2 - lat1)
|
||||
dlon = math.radians(lon2 - lon1)
|
||||
a = math.sin(dlat / 2) ** 2 + math.cos(rlat1) * math.cos(rlat2) * math.sin(dlon / 2) ** 2
|
||||
return R * 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
|
||||
|
||||
|
||||
def geocode(query: str) -> tuple[float, float]:
|
||||
"""Convert address/city/zip to coordinates via Nominatim."""
|
||||
params = urllib.parse.urlencode({"q": query, "format": "json", "limit": 1})
|
||||
results = _http_get(f"{NOMINATIM_URL}?{params}")
|
||||
if not results:
|
||||
print(f"Error: Could not geocode '{query}'. Try a more specific address.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return float(results[0]["lat"]), float(results[0]["lon"])
|
||||
|
||||
|
||||
def find_nearby(lat: float, lon: float, types: list[str], radius: int = 1500, limit: int = 15) -> list[dict]:
|
||||
"""Query Overpass for nearby amenities."""
|
||||
# Build Overpass QL query
|
||||
type_filters = "".join(
|
||||
f'nwr["amenity"="{t}"](around:{radius},{lat},{lon});' for t in types
|
||||
)
|
||||
query = f"[out:json][timeout:{TIMEOUT}];({type_filters});out center tags;"
|
||||
|
||||
# Try each Overpass server
|
||||
data = None
|
||||
for url in OVERPASS_URLS:
|
||||
try:
|
||||
data = _http_post(url, f"data={urllib.parse.quote(query)}")
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if not data:
|
||||
return []
|
||||
|
||||
# Parse results
|
||||
places = []
|
||||
for el in data.get("elements", []):
|
||||
tags = el.get("tags", {})
|
||||
name = tags.get("name")
|
||||
if not name:
|
||||
continue
|
||||
|
||||
# Get coordinates (nodes have lat/lon directly, ways/relations use center)
|
||||
plat = el.get("lat") or (el.get("center", {}) or {}).get("lat")
|
||||
plon = el.get("lon") or (el.get("center", {}) or {}).get("lon")
|
||||
if not plat or not plon:
|
||||
continue
|
||||
|
||||
dist = haversine(lat, lon, plat, plon)
|
||||
|
||||
place = {
|
||||
"name": name,
|
||||
"type": tags.get("amenity", ""),
|
||||
"distance_m": round(dist),
|
||||
"lat": plat,
|
||||
"lon": plon,
|
||||
"maps_url": f"https://www.google.com/maps/search/?api=1&query={plat},{plon}",
|
||||
"directions_url": f"https://www.google.com/maps/dir/?api=1&origin={lat},{lon}&destination={plat},{plon}",
|
||||
}
|
||||
|
||||
# Add useful optional fields
|
||||
if tags.get("cuisine"):
|
||||
place["cuisine"] = tags["cuisine"]
|
||||
if tags.get("opening_hours"):
|
||||
place["hours"] = tags["opening_hours"]
|
||||
if tags.get("phone"):
|
||||
place["phone"] = tags["phone"]
|
||||
if tags.get("website"):
|
||||
place["website"] = tags["website"]
|
||||
if tags.get("addr:street"):
|
||||
addr_parts = [tags.get("addr:housenumber", ""), tags.get("addr:street", "")]
|
||||
if tags.get("addr:city"):
|
||||
addr_parts.append(tags["addr:city"])
|
||||
place["address"] = " ".join(p for p in addr_parts if p)
|
||||
|
||||
places.append(place)
|
||||
|
||||
# Sort by distance, limit results
|
||||
places.sort(key=lambda p: p["distance_m"])
|
||||
return places[:limit]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Find nearby places via OpenStreetMap")
|
||||
parser.add_argument("--lat", type=float, help="Latitude")
|
||||
parser.add_argument("--lon", type=float, help="Longitude")
|
||||
parser.add_argument("--near", type=str, help="Address, city, or zip code (geocoded automatically)")
|
||||
parser.add_argument("--type", action="append", dest="types", default=[], help="Place type (restaurant, cafe, bar, pharmacy, etc.)")
|
||||
parser.add_argument("--radius", type=int, default=1500, help="Search radius in meters (default: 1500)")
|
||||
parser.add_argument("--limit", type=int, default=15, help="Max results (default: 15)")
|
||||
parser.add_argument("--json", action="store_true", dest="json_output", help="Output as JSON")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Resolve coordinates
|
||||
if args.near:
|
||||
lat, lon = geocode(args.near)
|
||||
elif args.lat is not None and args.lon is not None:
|
||||
lat, lon = args.lat, args.lon
|
||||
else:
|
||||
print("Error: Provide --lat/--lon or --near", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not args.types:
|
||||
args.types = ["restaurant"]
|
||||
|
||||
places = find_nearby(lat, lon, args.types, args.radius, args.limit)
|
||||
|
||||
if args.json_output:
|
||||
print(json.dumps({"origin": {"lat": lat, "lon": lon}, "results": places, "count": len(places)}, indent=2))
|
||||
else:
|
||||
if not places:
|
||||
print(f"No {'/'.join(args.types)} found within {args.radius}m")
|
||||
return
|
||||
print(f"Found {len(places)} places within {args.radius}m:\n")
|
||||
for i, p in enumerate(places, 1):
|
||||
dist_str = f"{p['distance_m']}m" if p["distance_m"] < 1000 else f"{p['distance_m']/1000:.1f}km"
|
||||
print(f" {i}. {p['name']} ({p['type']}) — {dist_str}")
|
||||
if p.get("cuisine"):
|
||||
print(f" Cuisine: {p['cuisine']}")
|
||||
if p.get("hours"):
|
||||
print(f" Hours: {p['hours']}")
|
||||
if p.get("address"):
|
||||
print(f" Address: {p['address']}")
|
||||
print(f" Map: {p['maps_url']}")
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -321,6 +321,32 @@ mcp_servers:
|
||||
|
||||
All tools from all servers are registered and available simultaneously. Each server's tools are prefixed with its name to avoid collisions.
|
||||
|
||||
## Sampling (Server-Initiated LLM Requests)
|
||||
|
||||
Hermes supports MCP's `sampling/createMessage` capability — MCP servers can request LLM completions through the agent during tool execution. This enables agent-in-the-loop workflows (data analysis, content generation, decision-making).
|
||||
|
||||
Sampling is **enabled by default**. Configure per server:
|
||||
|
||||
```yaml
|
||||
mcp_servers:
|
||||
my_server:
|
||||
command: "npx"
|
||||
args: ["-y", "my-mcp-server"]
|
||||
sampling:
|
||||
enabled: true # default: true
|
||||
model: "gemini-3-flash" # model override (optional)
|
||||
max_tokens_cap: 4096 # max tokens per request
|
||||
timeout: 30 # LLM call timeout (seconds)
|
||||
max_rpm: 10 # max requests per minute
|
||||
allowed_models: [] # model whitelist (empty = all)
|
||||
max_tool_rounds: 5 # tool loop limit (0 = disable)
|
||||
log_level: "info" # audit verbosity
|
||||
```
|
||||
|
||||
Servers can also include `tools` in sampling requests for multi-turn tool-augmented workflows. The `max_tool_rounds` config prevents infinite tool loops. Per-server audit metrics (requests, errors, tokens, tool use count) are tracked via `get_mcp_status()`.
|
||||
|
||||
Disable sampling for untrusted servers with `sampling: { enabled: false }`.
|
||||
|
||||
## Notes
|
||||
|
||||
- MCP tools are called synchronously from the agent's perspective but run asynchronously on a dedicated background event loop
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
Media content extraction and transformation tools — YouTube transcripts, audio, video processing.
|
||||
---
|
||||
description: Skills for working with media content — YouTube transcripts, GIF search, music generation, and audio visualization.
|
||||
---
|
||||
|
||||
3
skills/mlops/cloud/DESCRIPTION.md
Normal file
3
skills/mlops/cloud/DESCRIPTION.md
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
description: GPU cloud providers and serverless compute platforms for ML workloads.
|
||||
---
|
||||
3
skills/mlops/evaluation/DESCRIPTION.md
Normal file
3
skills/mlops/evaluation/DESCRIPTION.md
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
description: Model evaluation benchmarks, experiment tracking, data curation, tokenizers, and interpretability tools.
|
||||
---
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user