mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-02 08:47:26 +08:00
Compare commits
11 Commits
hermes/sla
...
fix/1056-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
232ba441d7 | ||
|
|
34e120bcbb | ||
|
|
779f8df6a6 | ||
|
|
62abb453d3 | ||
|
|
735a6e7651 | ||
|
|
e5ddca1c8b | ||
|
|
fd0e1aac72 | ||
|
|
8ccd14a0d4 | ||
|
|
6c611c852e | ||
|
|
f882dabf19 | ||
|
|
df9020dfa3 |
@@ -497,6 +497,66 @@ def convert_tools_to_anthropic(tools: List[Dict]) -> List[Dict]:
|
||||
return result
|
||||
|
||||
|
||||
def _image_source_from_openai_url(url: str) -> Dict[str, str]:
|
||||
"""Convert an OpenAI-style image URL/data URL into Anthropic image source."""
|
||||
url = str(url or "").strip()
|
||||
if not url:
|
||||
return {"type": "url", "url": ""}
|
||||
|
||||
if url.startswith("data:"):
|
||||
header, _, data = url.partition(",")
|
||||
media_type = "image/jpeg"
|
||||
if header.startswith("data:"):
|
||||
mime_part = header[len("data:"):].split(";", 1)[0].strip()
|
||||
if mime_part.startswith("image/"):
|
||||
media_type = mime_part
|
||||
return {
|
||||
"type": "base64",
|
||||
"media_type": media_type,
|
||||
"data": data,
|
||||
}
|
||||
|
||||
return {"type": "url", "url": url}
|
||||
|
||||
|
||||
def _convert_content_part_to_anthropic(part: Any) -> Optional[Dict[str, Any]]:
|
||||
"""Convert a single OpenAI-style content part to Anthropic format."""
|
||||
if part is None:
|
||||
return None
|
||||
if isinstance(part, str):
|
||||
return {"type": "text", "text": part}
|
||||
if not isinstance(part, dict):
|
||||
return {"type": "text", "text": str(part)}
|
||||
|
||||
ptype = part.get("type")
|
||||
|
||||
if ptype == "input_text":
|
||||
block: Dict[str, Any] = {"type": "text", "text": part.get("text", "")}
|
||||
elif ptype in {"image_url", "input_image"}:
|
||||
image_value = part.get("image_url", {})
|
||||
url = image_value.get("url", "") if isinstance(image_value, dict) else str(image_value or "")
|
||||
block = {"type": "image", "source": _image_source_from_openai_url(url)}
|
||||
else:
|
||||
block = dict(part)
|
||||
|
||||
if isinstance(part.get("cache_control"), dict) and "cache_control" not in block:
|
||||
block["cache_control"] = dict(part["cache_control"])
|
||||
return block
|
||||
|
||||
|
||||
def _convert_content_to_anthropic(content: Any) -> Any:
|
||||
"""Convert OpenAI-style multimodal content arrays to Anthropic blocks."""
|
||||
if not isinstance(content, list):
|
||||
return content
|
||||
|
||||
converted = []
|
||||
for part in content:
|
||||
block = _convert_content_part_to_anthropic(part)
|
||||
if block is not None:
|
||||
converted.append(block)
|
||||
return converted
|
||||
|
||||
|
||||
def convert_messages_to_anthropic(
|
||||
messages: List[Dict],
|
||||
) -> Tuple[Optional[Any], List[Dict]]:
|
||||
@@ -533,11 +593,9 @@ def convert_messages_to_anthropic(
|
||||
blocks = []
|
||||
if content:
|
||||
if isinstance(content, list):
|
||||
for part in content:
|
||||
if isinstance(part, dict):
|
||||
blocks.append(dict(part))
|
||||
elif part is not None:
|
||||
blocks.append({"type": "text", "text": str(part)})
|
||||
converted_content = _convert_content_to_anthropic(content)
|
||||
if isinstance(converted_content, list):
|
||||
blocks.extend(converted_content)
|
||||
else:
|
||||
blocks.append({"type": "text", "text": str(content)})
|
||||
for tc in m.get("tool_calls", []):
|
||||
@@ -587,12 +645,11 @@ def convert_messages_to_anthropic(
|
||||
|
||||
# Regular user message
|
||||
if isinstance(content, list):
|
||||
converted_blocks = []
|
||||
for part in content:
|
||||
converted = _convert_user_content_part_to_anthropic(part)
|
||||
if converted is not None:
|
||||
converted_blocks.append(converted)
|
||||
result.append({"role": "user", "content": converted_blocks or [{"type": "text", "text": ""}]})
|
||||
converted_blocks = _convert_content_to_anthropic(content)
|
||||
result.append({
|
||||
"role": "user",
|
||||
"content": converted_blocks or [{"type": "text", "text": ""}],
|
||||
})
|
||||
else:
|
||||
result.append({"role": "user", "content": content})
|
||||
|
||||
|
||||
@@ -83,7 +83,10 @@ _AUTH_JSON_PATH = get_hermes_home() / "auth.json"
|
||||
|
||||
# Codex fallback: uses the Responses API (the only endpoint the Codex
|
||||
# OAuth token can access) with a fast model for auxiliary tasks.
|
||||
_CODEX_AUX_MODEL = "gpt-5.3-codex"
|
||||
# ChatGPT-backed Codex accounts currently reject gpt-5.3-codex for these
|
||||
# auxiliary flows, while gpt-5.2-codex remains broadly available and supports
|
||||
# vision via Responses.
|
||||
_CODEX_AUX_MODEL = "gpt-5.2-codex"
|
||||
_CODEX_AUX_BASE_URL = "https://chatgpt.com/backend-api/codex"
|
||||
|
||||
|
||||
|
||||
@@ -321,25 +321,32 @@ def build_session_key(source: SessionSource) -> str:
|
||||
This is the single source of truth for session key construction.
|
||||
|
||||
DM rules:
|
||||
- WhatsApp DMs include chat_id (multi-user support).
|
||||
- Other DMs include thread_id when present (e.g. Slack threaded DMs),
|
||||
so each DM thread gets its own session while top-level DMs share one.
|
||||
- Without thread_id or chat_id, all DMs share a single session.
|
||||
- DMs include chat_id when present, so each private conversation is isolated.
|
||||
- thread_id further differentiates threaded DMs within the same DM chat.
|
||||
- Without chat_id, thread_id is used as a best-effort fallback.
|
||||
- Without thread_id or chat_id, DMs share a single session.
|
||||
|
||||
Group/channel rules:
|
||||
- thread_id differentiates threads within a channel.
|
||||
- Without thread_id, all messages in a channel share one session.
|
||||
- chat_id identifies the parent group/channel.
|
||||
- thread_id differentiates threads within that parent chat.
|
||||
- Without identifiers, messages fall back to one session per platform/chat_type.
|
||||
"""
|
||||
platform = source.platform.value
|
||||
if source.chat_type == "dm":
|
||||
if source.chat_id:
|
||||
if source.thread_id:
|
||||
return f"agent:main:{platform}:dm:{source.chat_id}:{source.thread_id}"
|
||||
return f"agent:main:{platform}:dm:{source.chat_id}"
|
||||
if source.thread_id:
|
||||
return f"agent:main:{platform}:dm:{source.thread_id}"
|
||||
if platform == "whatsapp" and source.chat_id:
|
||||
return f"agent:main:{platform}:dm:{source.chat_id}"
|
||||
return f"agent:main:{platform}:dm"
|
||||
if source.chat_id:
|
||||
if source.thread_id:
|
||||
return f"agent:main:{platform}:{source.chat_type}:{source.chat_id}:{source.thread_id}"
|
||||
return f"agent:main:{platform}:{source.chat_type}:{source.chat_id}"
|
||||
if source.thread_id:
|
||||
return f"agent:main:{platform}:{source.chat_type}:{source.chat_id}:{source.thread_id}"
|
||||
return f"agent:main:{platform}:{source.chat_type}:{source.chat_id}"
|
||||
return f"agent:main:{platform}:{source.chat_type}:{source.thread_id}"
|
||||
return f"agent:main:{platform}:{source.chat_type}"
|
||||
|
||||
|
||||
class SessionStore:
|
||||
|
||||
@@ -2032,6 +2032,16 @@ def _resolve_stash_selector(git_cmd: list[str], cwd: Path, stash_ref: str) -> Op
|
||||
|
||||
|
||||
|
||||
def _print_stash_cleanup_guidance(stash_ref: str, stash_selector: Optional[str] = None) -> None:
|
||||
print(" Check `git status` first so you don't accidentally reapply the same change twice.")
|
||||
print(" Find the saved entry with: git stash list --format='%gd %H %s'")
|
||||
if stash_selector:
|
||||
print(f" Remove it with: git stash drop {stash_selector}")
|
||||
else:
|
||||
print(f" Look for commit {stash_ref}, then drop its selector with: git stash drop stash@{{N}}")
|
||||
|
||||
|
||||
|
||||
def _restore_stashed_changes(
|
||||
git_cmd: list[str],
|
||||
cwd: Path,
|
||||
@@ -2072,7 +2082,7 @@ def _restore_stashed_changes(
|
||||
if stash_selector is None:
|
||||
print("⚠ Local changes were restored, but Hermes couldn't find the stash entry to drop.")
|
||||
print(" The stash was left in place. You can remove it manually after checking the result.")
|
||||
print(f" Look for commit {stash_ref} in `git stash list --format='%gd %H'` and drop that selector.")
|
||||
_print_stash_cleanup_guidance(stash_ref)
|
||||
else:
|
||||
drop = subprocess.run(
|
||||
git_cmd + ["stash", "drop", stash_selector],
|
||||
@@ -2087,7 +2097,7 @@ def _restore_stashed_changes(
|
||||
if drop.stderr.strip():
|
||||
print(drop.stderr.strip())
|
||||
print(" The stash was left in place. You can remove it manually after checking the result.")
|
||||
print(f" If needed: git stash drop {stash_selector}")
|
||||
_print_stash_cleanup_guidance(stash_ref, stash_selector)
|
||||
|
||||
print("⚠ Local changes were restored on top of the updated codebase.")
|
||||
print(" Review `git diff` / `git status` if Hermes behaves unexpectedly.")
|
||||
|
||||
153
run_agent.py
153
run_agent.py
@@ -21,6 +21,8 @@ Usage:
|
||||
"""
|
||||
|
||||
import atexit
|
||||
import asyncio
|
||||
import base64
|
||||
import concurrent.futures
|
||||
import copy
|
||||
import hashlib
|
||||
@@ -31,6 +33,7 @@ import os
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import threading
|
||||
import weakref
|
||||
@@ -504,6 +507,11 @@ class AIAgent:
|
||||
self._persist_user_message_idx = None
|
||||
self._persist_user_message_override = None
|
||||
|
||||
# Cache anthropic image-to-text fallbacks per image payload/URL so a
|
||||
# single tool loop does not repeatedly re-run auxiliary vision on the
|
||||
# same image history.
|
||||
self._anthropic_image_fallback_cache: Dict[str, str] = {}
|
||||
|
||||
# Initialize LLM client via centralized provider router.
|
||||
# The router handles auth resolution, base URL, headers, and
|
||||
# Codex/Anthropic wrapping for all known providers.
|
||||
@@ -3034,13 +3042,156 @@ class AIAgent:
|
||||
|
||||
# ── End provider fallback ──────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _content_has_image_parts(content: Any) -> bool:
|
||||
if not isinstance(content, list):
|
||||
return False
|
||||
for part in content:
|
||||
if isinstance(part, dict) and part.get("type") in {"image_url", "input_image"}:
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _materialize_data_url_for_vision(image_url: str) -> tuple[str, Optional[Path]]:
|
||||
header, _, data = str(image_url or "").partition(",")
|
||||
mime = "image/jpeg"
|
||||
if header.startswith("data:"):
|
||||
mime_part = header[len("data:"):].split(";", 1)[0].strip()
|
||||
if mime_part.startswith("image/"):
|
||||
mime = mime_part
|
||||
suffix = {
|
||||
"image/png": ".png",
|
||||
"image/gif": ".gif",
|
||||
"image/webp": ".webp",
|
||||
"image/jpeg": ".jpg",
|
||||
"image/jpg": ".jpg",
|
||||
}.get(mime, ".jpg")
|
||||
tmp = tempfile.NamedTemporaryFile(prefix="anthropic_image_", suffix=suffix, delete=False)
|
||||
with tmp:
|
||||
tmp.write(base64.b64decode(data))
|
||||
path = Path(tmp.name)
|
||||
return str(path), path
|
||||
|
||||
def _describe_image_for_anthropic_fallback(self, image_url: str, role: str) -> str:
|
||||
cache_key = hashlib.sha256(str(image_url or "").encode("utf-8")).hexdigest()
|
||||
cached = self._anthropic_image_fallback_cache.get(cache_key)
|
||||
if cached:
|
||||
return cached
|
||||
|
||||
role_label = {
|
||||
"assistant": "assistant",
|
||||
"tool": "tool result",
|
||||
}.get(role, "user")
|
||||
analysis_prompt = (
|
||||
"Describe everything visible in this image in thorough detail. "
|
||||
"Include any text, code, UI, data, objects, people, layout, colors, "
|
||||
"and any other notable visual information."
|
||||
)
|
||||
|
||||
vision_source = str(image_url or "")
|
||||
cleanup_path: Optional[Path] = None
|
||||
if vision_source.startswith("data:"):
|
||||
vision_source, cleanup_path = self._materialize_data_url_for_vision(vision_source)
|
||||
|
||||
description = ""
|
||||
try:
|
||||
from tools.vision_tools import vision_analyze_tool
|
||||
|
||||
result_json = asyncio.run(
|
||||
vision_analyze_tool(image_url=vision_source, user_prompt=analysis_prompt)
|
||||
)
|
||||
result = json.loads(result_json) if isinstance(result_json, str) else {}
|
||||
description = (result.get("analysis") or "").strip()
|
||||
except Exception as e:
|
||||
description = f"Image analysis failed: {e}"
|
||||
finally:
|
||||
if cleanup_path and cleanup_path.exists():
|
||||
try:
|
||||
cleanup_path.unlink()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if not description:
|
||||
description = "Image analysis failed."
|
||||
|
||||
note = f"[The {role_label} attached an image. Here's what it contains:\n{description}]"
|
||||
if vision_source and not str(image_url or "").startswith("data:"):
|
||||
note += (
|
||||
f"\n[If you need a closer look, use vision_analyze with image_url: {vision_source}]"
|
||||
)
|
||||
|
||||
self._anthropic_image_fallback_cache[cache_key] = note
|
||||
return note
|
||||
|
||||
def _preprocess_anthropic_content(self, content: Any, role: str) -> Any:
|
||||
if not self._content_has_image_parts(content):
|
||||
return content
|
||||
|
||||
text_parts: List[str] = []
|
||||
image_notes: List[str] = []
|
||||
for part in content:
|
||||
if isinstance(part, str):
|
||||
if part.strip():
|
||||
text_parts.append(part.strip())
|
||||
continue
|
||||
if not isinstance(part, dict):
|
||||
continue
|
||||
|
||||
ptype = part.get("type")
|
||||
if ptype in {"text", "input_text"}:
|
||||
text = str(part.get("text", "") or "").strip()
|
||||
if text:
|
||||
text_parts.append(text)
|
||||
continue
|
||||
|
||||
if ptype in {"image_url", "input_image"}:
|
||||
image_data = part.get("image_url", {})
|
||||
image_url = image_data.get("url", "") if isinstance(image_data, dict) else str(image_data or "")
|
||||
if image_url:
|
||||
image_notes.append(self._describe_image_for_anthropic_fallback(image_url, role))
|
||||
else:
|
||||
image_notes.append("[An image was attached but no image source was available.]")
|
||||
continue
|
||||
|
||||
text = str(part.get("text", "") or "").strip()
|
||||
if text:
|
||||
text_parts.append(text)
|
||||
|
||||
prefix = "\n\n".join(note for note in image_notes if note).strip()
|
||||
suffix = "\n".join(text for text in text_parts if text).strip()
|
||||
if prefix and suffix:
|
||||
return f"{prefix}\n\n{suffix}"
|
||||
if prefix:
|
||||
return prefix
|
||||
if suffix:
|
||||
return suffix
|
||||
return "[A multimodal message was converted to text for Anthropic compatibility.]"
|
||||
|
||||
def _prepare_anthropic_messages_for_api(self, api_messages: list) -> list:
|
||||
if not any(
|
||||
isinstance(msg, dict) and self._content_has_image_parts(msg.get("content"))
|
||||
for msg in api_messages
|
||||
):
|
||||
return api_messages
|
||||
|
||||
transformed = copy.deepcopy(api_messages)
|
||||
for msg in transformed:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
msg["content"] = self._preprocess_anthropic_content(
|
||||
msg.get("content"),
|
||||
str(msg.get("role", "user") or "user"),
|
||||
)
|
||||
return transformed
|
||||
|
||||
def _build_api_kwargs(self, api_messages: list) -> dict:
|
||||
"""Build the keyword arguments dict for the active API mode."""
|
||||
if self.api_mode == "anthropic_messages":
|
||||
from agent.anthropic_adapter import build_anthropic_kwargs
|
||||
anthropic_messages = self._prepare_anthropic_messages_for_api(api_messages)
|
||||
return build_anthropic_kwargs(
|
||||
model=self.model,
|
||||
messages=api_messages,
|
||||
messages=anthropic_messages,
|
||||
tools=self.tools,
|
||||
max_tokens=self.max_tokens,
|
||||
reasoning_config=self.reasoning_config,
|
||||
|
||||
@@ -195,7 +195,7 @@ class TestGetTextAuxiliaryClient:
|
||||
with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \
|
||||
patch("agent.auxiliary_client.OpenAI") as mock_openai:
|
||||
client, model = get_text_auxiliary_client()
|
||||
assert model == "gpt-5.3-codex"
|
||||
assert model == "gpt-5.2-codex"
|
||||
# Returns a CodexAuxiliaryClient wrapper, not a raw OpenAI client
|
||||
from agent.auxiliary_client import CodexAuxiliaryClient
|
||||
assert isinstance(client, CodexAuxiliaryClient)
|
||||
@@ -288,7 +288,7 @@ class TestVisionClientFallback:
|
||||
client, model = get_vision_auxiliary_client()
|
||||
from agent.auxiliary_client import CodexAuxiliaryClient
|
||||
assert isinstance(client, CodexAuxiliaryClient)
|
||||
assert model == "gpt-5.3-codex"
|
||||
assert model == "gpt-5.2-codex"
|
||||
|
||||
def test_vision_auto_falls_back_to_custom_endpoint(self, monkeypatch):
|
||||
"""Custom endpoint is used as fallback in vision auto mode.
|
||||
@@ -371,7 +371,7 @@ class TestVisionClientFallback:
|
||||
client, model = get_vision_auxiliary_client()
|
||||
from agent.auxiliary_client import CodexAuxiliaryClient
|
||||
assert isinstance(client, CodexAuxiliaryClient)
|
||||
assert model == "gpt-5.3-codex"
|
||||
assert model == "gpt-5.2-codex"
|
||||
|
||||
|
||||
class TestGetAuxiliaryProvider:
|
||||
@@ -489,7 +489,7 @@ class TestResolveForcedProvider:
|
||||
client, model = _resolve_forced_provider("main")
|
||||
from agent.auxiliary_client import CodexAuxiliaryClient
|
||||
assert isinstance(client, CodexAuxiliaryClient)
|
||||
assert model == "gpt-5.3-codex"
|
||||
assert model == "gpt-5.2-codex"
|
||||
|
||||
def test_forced_codex(self, codex_auth_dir, monkeypatch):
|
||||
with patch("agent.auxiliary_client._read_nous_auth", return_value=None), \
|
||||
@@ -497,7 +497,7 @@ class TestResolveForcedProvider:
|
||||
client, model = _resolve_forced_provider("codex")
|
||||
from agent.auxiliary_client import CodexAuxiliaryClient
|
||||
assert isinstance(client, CodexAuxiliaryClient)
|
||||
assert model == "gpt-5.3-codex"
|
||||
assert model == "gpt-5.2-codex"
|
||||
|
||||
def test_forced_codex_no_token(self, monkeypatch):
|
||||
with patch("agent.auxiliary_client._read_codex_access_token", return_value=None):
|
||||
|
||||
@@ -50,11 +50,11 @@ class TestInterruptKeyConsistency:
|
||||
"""Ensure adapter interrupt methods are queried with session_key, not chat_id."""
|
||||
|
||||
def test_session_key_differs_from_chat_id_for_dm(self):
|
||||
"""Session key for a DM is NOT the same as chat_id."""
|
||||
"""Session key for a DM is namespaced and includes the DM chat_id."""
|
||||
source = _source("123456", "dm")
|
||||
session_key = build_session_key(source)
|
||||
assert session_key != source.chat_id
|
||||
assert session_key == "agent:main:telegram:dm"
|
||||
assert session_key == "agent:main:telegram:dm:123456"
|
||||
|
||||
def test_session_key_differs_from_chat_id_for_group(self):
|
||||
"""Session key for a group chat includes prefix, unlike raw chat_id."""
|
||||
|
||||
@@ -338,7 +338,7 @@ class TestSessionStoreRewriteTranscript:
|
||||
|
||||
class TestWhatsAppDMSessionKeyConsistency:
|
||||
"""Regression: all session-key construction must go through build_session_key
|
||||
so WhatsApp DMs include chat_id while other DMs do not."""
|
||||
so DMs are isolated by chat_id across platforms."""
|
||||
|
||||
@pytest.fixture()
|
||||
def store(self, tmp_path):
|
||||
@@ -369,15 +369,24 @@ class TestWhatsAppDMSessionKeyConsistency:
|
||||
)
|
||||
assert store._generate_session_key(source) == build_session_key(source)
|
||||
|
||||
def test_telegram_dm_omits_chat_id(self):
|
||||
"""Non-WhatsApp DMs should still omit chat_id (single owner DM)."""
|
||||
def test_telegram_dm_includes_chat_id(self):
|
||||
"""Non-WhatsApp DMs should also include chat_id to separate users."""
|
||||
source = SessionSource(
|
||||
platform=Platform.TELEGRAM,
|
||||
chat_id="99",
|
||||
chat_type="dm",
|
||||
)
|
||||
key = build_session_key(source)
|
||||
assert key == "agent:main:telegram:dm"
|
||||
assert key == "agent:main:telegram:dm:99"
|
||||
|
||||
def test_distinct_dm_chat_ids_get_distinct_session_keys(self):
|
||||
"""Different DM chats must not collapse into one shared session."""
|
||||
first = SessionSource(platform=Platform.TELEGRAM, chat_id="99", chat_type="dm")
|
||||
second = SessionSource(platform=Platform.TELEGRAM, chat_id="100", chat_type="dm")
|
||||
|
||||
assert build_session_key(first) == "agent:main:telegram:dm:99"
|
||||
assert build_session_key(second) == "agent:main:telegram:dm:100"
|
||||
assert build_session_key(first) != build_session_key(second)
|
||||
|
||||
def test_discord_group_includes_chat_id(self):
|
||||
"""Group/channel keys include chat_type and chat_id."""
|
||||
|
||||
@@ -134,6 +134,16 @@ def test_restore_stashed_changes_applies_without_prompt_when_disabled(monkeypatc
|
||||
|
||||
|
||||
|
||||
def test_print_stash_cleanup_guidance_with_selector(capsys):
|
||||
hermes_main._print_stash_cleanup_guidance("abc123", "stash@{2}")
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert "Check `git status` first" in out
|
||||
assert "git stash list --format='%gd %H %s'" in out
|
||||
assert "git stash drop stash@{2}" in out
|
||||
|
||||
|
||||
|
||||
def test_restore_stashed_changes_keeps_going_when_stash_entry_cannot_be_resolved(monkeypatch, tmp_path, capsys):
|
||||
calls = []
|
||||
|
||||
@@ -157,6 +167,8 @@ def test_restore_stashed_changes_keeps_going_when_stash_entry_cannot_be_resolved
|
||||
out = capsys.readouterr().out
|
||||
assert "couldn't find the stash entry to drop" in out
|
||||
assert "stash was left in place" in out
|
||||
assert "Check `git status` first" in out
|
||||
assert "git stash list --format='%gd %H %s'" in out
|
||||
assert "Look for commit abc123" in out
|
||||
|
||||
|
||||
@@ -183,6 +195,8 @@ def test_restore_stashed_changes_keeps_going_when_drop_fails(monkeypatch, tmp_pa
|
||||
out = capsys.readouterr().out
|
||||
assert "couldn't drop the saved stash entry" in out
|
||||
assert "drop failed" in out
|
||||
assert "Check `git status` first" in out
|
||||
assert "git stash list --format='%gd %H %s'" in out
|
||||
assert "git stash drop stash@{0}" in out
|
||||
|
||||
|
||||
|
||||
@@ -495,6 +495,59 @@ class TestConvertMessages:
|
||||
assert len(result) == 1
|
||||
assert result[0]["role"] == "user"
|
||||
|
||||
def test_converts_user_image_url_blocks_to_anthropic_image_blocks(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Can you see this?"},
|
||||
{"type": "image_url", "image_url": {"url": "https://example.com/cat.png"}},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
_, result = convert_messages_to_anthropic(messages)
|
||||
|
||||
assert result == [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Can you see this?"},
|
||||
{"type": "image", "source": {"type": "url", "url": "https://example.com/cat.png"}},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
def test_converts_data_url_image_blocks_to_base64_anthropic_image_blocks(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "input_text", "text": "What is in this screenshot?"},
|
||||
{"type": "input_image", "image_url": "data:image/png;base64,AAAA"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
_, result = convert_messages_to_anthropic(messages)
|
||||
|
||||
assert result == [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What is in this screenshot?"},
|
||||
{
|
||||
"type": "image",
|
||||
"source": {
|
||||
"type": "base64",
|
||||
"media_type": "image/png",
|
||||
"data": "AAAA",
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
def test_converts_tool_calls(self):
|
||||
messages = [
|
||||
{
|
||||
|
||||
@@ -543,7 +543,7 @@ class TestAuxiliaryClientProviderPriority:
|
||||
patch("agent.auxiliary_client._read_codex_access_token", return_value="codex-tok"), \
|
||||
patch("agent.auxiliary_client.OpenAI"):
|
||||
client, model = get_text_auxiliary_client()
|
||||
assert model == "gpt-5.3-codex"
|
||||
assert model == "gpt-5.2-codex"
|
||||
assert isinstance(client, CodexAuxiliaryClient)
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import uuid
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -1986,6 +1986,69 @@ class TestBuildApiKwargsAnthropicMaxTokens:
|
||||
assert call_args[0][3] is None
|
||||
|
||||
|
||||
class TestAnthropicImageFallback:
|
||||
def test_build_api_kwargs_converts_multimodal_user_image_to_text(self, agent):
|
||||
agent.api_mode = "anthropic_messages"
|
||||
agent.reasoning_config = None
|
||||
|
||||
api_messages = [{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Can you see this now?"},
|
||||
{"type": "image_url", "image_url": {"url": "https://example.com/cat.png"}},
|
||||
],
|
||||
}]
|
||||
|
||||
with (
|
||||
patch("tools.vision_tools.vision_analyze_tool", new=AsyncMock(return_value=json.dumps({"success": True, "analysis": "A cat sitting on a chair."}))),
|
||||
patch("agent.anthropic_adapter.build_anthropic_kwargs") as mock_build,
|
||||
):
|
||||
mock_build.return_value = {"model": "claude-sonnet-4-20250514", "messages": [], "max_tokens": 4096}
|
||||
agent._build_api_kwargs(api_messages)
|
||||
|
||||
kwargs = mock_build.call_args.kwargs or dict(zip(
|
||||
["model", "messages", "tools", "max_tokens", "reasoning_config"],
|
||||
mock_build.call_args.args,
|
||||
))
|
||||
transformed = kwargs["messages"]
|
||||
assert isinstance(transformed[0]["content"], str)
|
||||
assert "A cat sitting on a chair." in transformed[0]["content"]
|
||||
assert "Can you see this now?" in transformed[0]["content"]
|
||||
assert "vision_analyze with image_url: https://example.com/cat.png" in transformed[0]["content"]
|
||||
|
||||
def test_build_api_kwargs_reuses_cached_image_analysis_for_duplicate_images(self, agent):
|
||||
agent.api_mode = "anthropic_messages"
|
||||
agent.reasoning_config = None
|
||||
data_url = "data:image/png;base64,QUFBQQ=="
|
||||
|
||||
api_messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "first"},
|
||||
{"type": "input_image", "image_url": data_url},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "second"},
|
||||
{"type": "input_image", "image_url": data_url},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
mock_vision = AsyncMock(return_value=json.dumps({"success": True, "analysis": "A small test image."}))
|
||||
with (
|
||||
patch("tools.vision_tools.vision_analyze_tool", new=mock_vision),
|
||||
patch("agent.anthropic_adapter.build_anthropic_kwargs") as mock_build,
|
||||
):
|
||||
mock_build.return_value = {"model": "claude-sonnet-4-20250514", "messages": [], "max_tokens": 4096}
|
||||
agent._build_api_kwargs(api_messages)
|
||||
|
||||
assert mock_vision.await_count == 1
|
||||
|
||||
|
||||
class TestFallbackAnthropicProvider:
|
||||
"""Bug fix: _try_activate_fallback had no case for anthropic provider."""
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from tools.skills_hub import ClawHubSource
|
||||
from tools.skills_hub import ClawHubSource, SkillMeta
|
||||
|
||||
|
||||
class _MockResponse:
|
||||
@@ -22,21 +22,31 @@ class TestClawHubSource(unittest.TestCase):
|
||||
|
||||
@patch("tools.skills_hub._write_index_cache")
|
||||
@patch("tools.skills_hub._read_index_cache", return_value=None)
|
||||
@patch.object(ClawHubSource, "_load_catalog_index", return_value=[])
|
||||
@patch("tools.skills_hub.httpx.get")
|
||||
def test_search_uses_new_endpoint_and_parses_items(self, mock_get, _mock_read_cache, _mock_write_cache):
|
||||
mock_get.return_value = _MockResponse(
|
||||
status_code=200,
|
||||
json_data={
|
||||
"items": [
|
||||
{
|
||||
"slug": "caldav-calendar",
|
||||
"displayName": "CalDAV Calendar",
|
||||
"summary": "Calendar integration",
|
||||
"tags": ["calendar", "productivity"],
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
def test_search_uses_listing_endpoint_as_fallback(
|
||||
self, mock_get, _mock_load_catalog, _mock_read_cache, _mock_write_cache
|
||||
):
|
||||
def side_effect(url, *args, **kwargs):
|
||||
if url.endswith("/skills"):
|
||||
return _MockResponse(
|
||||
status_code=200,
|
||||
json_data={
|
||||
"items": [
|
||||
{
|
||||
"slug": "caldav-calendar",
|
||||
"displayName": "CalDAV Calendar",
|
||||
"summary": "Calendar integration",
|
||||
"tags": ["calendar", "productivity"],
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
if url.endswith("/skills/caldav"):
|
||||
return _MockResponse(status_code=404, json_data={})
|
||||
return _MockResponse(status_code=404, json_data={})
|
||||
|
||||
mock_get.side_effect = side_effect
|
||||
|
||||
results = self.src.search("caldav", limit=5)
|
||||
|
||||
@@ -45,11 +55,112 @@ class TestClawHubSource(unittest.TestCase):
|
||||
self.assertEqual(results[0].name, "CalDAV Calendar")
|
||||
self.assertEqual(results[0].description, "Calendar integration")
|
||||
|
||||
mock_get.assert_called_once()
|
||||
args, kwargs = mock_get.call_args
|
||||
self.assertGreaterEqual(mock_get.call_count, 2)
|
||||
args, kwargs = mock_get.call_args_list[0]
|
||||
self.assertTrue(args[0].endswith("/skills"))
|
||||
self.assertEqual(kwargs["params"], {"search": "caldav", "limit": 5})
|
||||
|
||||
@patch("tools.skills_hub._write_index_cache")
|
||||
@patch("tools.skills_hub._read_index_cache", return_value=None)
|
||||
@patch.object(
|
||||
ClawHubSource,
|
||||
"_load_catalog_index",
|
||||
return_value=[],
|
||||
)
|
||||
@patch("tools.skills_hub.httpx.get")
|
||||
def test_search_falls_back_to_exact_slug_when_search_results_are_irrelevant(
|
||||
self, mock_get, _mock_load_catalog, _mock_read_cache, _mock_write_cache
|
||||
):
|
||||
def side_effect(url, *args, **kwargs):
|
||||
if url.endswith("/skills"):
|
||||
return _MockResponse(
|
||||
status_code=200,
|
||||
json_data={
|
||||
"items": [
|
||||
{
|
||||
"slug": "apple-music-dj",
|
||||
"displayName": "Apple Music DJ",
|
||||
"summary": "Unrelated result",
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
if url.endswith("/skills/self-improving-agent"):
|
||||
return _MockResponse(
|
||||
status_code=200,
|
||||
json_data={
|
||||
"skill": {
|
||||
"slug": "self-improving-agent",
|
||||
"displayName": "self-improving-agent",
|
||||
"summary": "Captures learnings and errors for continuous improvement.",
|
||||
"tags": {"latest": "3.0.2", "automation": "3.0.2"},
|
||||
},
|
||||
"latestVersion": {"version": "3.0.2"},
|
||||
},
|
||||
)
|
||||
return _MockResponse(status_code=404, json_data={})
|
||||
|
||||
mock_get.side_effect = side_effect
|
||||
|
||||
results = self.src.search("self-improving-agent", limit=5)
|
||||
|
||||
self.assertEqual(len(results), 1)
|
||||
self.assertEqual(results[0].identifier, "self-improving-agent")
|
||||
self.assertEqual(results[0].name, "self-improving-agent")
|
||||
self.assertIn("continuous improvement", results[0].description)
|
||||
|
||||
@patch("tools.skills_hub.httpx.get")
|
||||
def test_search_repairs_poisoned_cache_with_exact_slug_lookup(self, mock_get):
|
||||
mock_get.return_value = _MockResponse(
|
||||
status_code=200,
|
||||
json_data={
|
||||
"skill": {
|
||||
"slug": "self-improving-agent",
|
||||
"displayName": "self-improving-agent",
|
||||
"summary": "Captures learnings and errors for continuous improvement.",
|
||||
"tags": {"latest": "3.0.2", "automation": "3.0.2"},
|
||||
},
|
||||
"latestVersion": {"version": "3.0.2"},
|
||||
},
|
||||
)
|
||||
|
||||
poisoned = [
|
||||
SkillMeta(
|
||||
name="Apple Music DJ",
|
||||
description="Unrelated cached result",
|
||||
source="clawhub",
|
||||
identifier="apple-music-dj",
|
||||
trust_level="community",
|
||||
tags=[],
|
||||
)
|
||||
]
|
||||
results = self.src._finalize_search_results("self-improving-agent", poisoned, 5)
|
||||
|
||||
self.assertEqual(len(results), 1)
|
||||
self.assertEqual(results[0].identifier, "self-improving-agent")
|
||||
mock_get.assert_called_once()
|
||||
self.assertTrue(mock_get.call_args.args[0].endswith("/skills/self-improving-agent"))
|
||||
|
||||
@patch.object(
|
||||
ClawHubSource,
|
||||
"_exact_slug_meta",
|
||||
return_value=SkillMeta(
|
||||
name="self-improving-agent",
|
||||
description="Captures learnings and errors for continuous improvement.",
|
||||
source="clawhub",
|
||||
identifier="self-improving-agent",
|
||||
trust_level="community",
|
||||
tags=["automation"],
|
||||
),
|
||||
)
|
||||
def test_search_matches_space_separated_query_to_hyphenated_slug(
|
||||
self, _mock_exact_slug
|
||||
):
|
||||
results = self.src.search("self improving", limit=5)
|
||||
|
||||
self.assertEqual(len(results), 1)
|
||||
self.assertEqual(results[0].identifier, "self-improving-agent")
|
||||
|
||||
@patch("tools.skills_hub.httpx.get")
|
||||
def test_inspect_maps_display_name_and_summary(self, mock_get):
|
||||
mock_get.return_value = _MockResponse(
|
||||
@@ -69,6 +180,29 @@ class TestClawHubSource(unittest.TestCase):
|
||||
self.assertEqual(meta.description, "Calendar integration")
|
||||
self.assertEqual(meta.identifier, "caldav-calendar")
|
||||
|
||||
@patch("tools.skills_hub.httpx.get")
|
||||
def test_inspect_handles_nested_skill_payload(self, mock_get):
|
||||
mock_get.return_value = _MockResponse(
|
||||
status_code=200,
|
||||
json_data={
|
||||
"skill": {
|
||||
"slug": "self-improving-agent",
|
||||
"displayName": "self-improving-agent",
|
||||
"summary": "Captures learnings and errors for continuous improvement.",
|
||||
"tags": {"latest": "3.0.2", "automation": "3.0.2"},
|
||||
},
|
||||
"latestVersion": {"version": "3.0.2"},
|
||||
},
|
||||
)
|
||||
|
||||
meta = self.src.inspect("self-improving-agent")
|
||||
|
||||
self.assertIsNotNone(meta)
|
||||
self.assertEqual(meta.name, "self-improving-agent")
|
||||
self.assertIn("continuous improvement", meta.description)
|
||||
self.assertEqual(meta.identifier, "self-improving-agent")
|
||||
self.assertEqual(meta.tags, ["automation"])
|
||||
|
||||
@patch("tools.skills_hub.httpx.get")
|
||||
def test_fetch_resolves_latest_version_and_downloads_raw_files(self, mock_get):
|
||||
def side_effect(url, *args, **kwargs):
|
||||
|
||||
@@ -1156,11 +1156,176 @@ class ClawHubSource(SkillSource):
|
||||
def trust_level_for(self, identifier: str) -> str:
|
||||
return "community"
|
||||
|
||||
@staticmethod
|
||||
def _normalize_tags(tags: Any) -> List[str]:
|
||||
if isinstance(tags, list):
|
||||
return [str(t) for t in tags]
|
||||
if isinstance(tags, dict):
|
||||
return [str(k) for k in tags.keys() if str(k) != "latest"]
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _coerce_skill_payload(data: Any) -> Optional[Dict[str, Any]]:
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
nested = data.get("skill")
|
||||
if isinstance(nested, dict):
|
||||
merged = dict(nested)
|
||||
latest_version = data.get("latestVersion")
|
||||
if latest_version is not None and "latestVersion" not in merged:
|
||||
merged["latestVersion"] = latest_version
|
||||
return merged
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _query_terms(query: str) -> List[str]:
|
||||
return [term for term in re.split(r"[^a-z0-9]+", query.lower()) if term]
|
||||
|
||||
@classmethod
|
||||
def _search_score(cls, query: str, meta: SkillMeta) -> int:
|
||||
query_norm = query.strip().lower()
|
||||
if not query_norm:
|
||||
return 1
|
||||
|
||||
identifier = (meta.identifier or "").lower()
|
||||
name = (meta.name or "").lower()
|
||||
description = (meta.description or "").lower()
|
||||
normalized_identifier = " ".join(cls._query_terms(identifier))
|
||||
normalized_name = " ".join(cls._query_terms(name))
|
||||
query_terms = cls._query_terms(query_norm)
|
||||
identifier_terms = cls._query_terms(identifier)
|
||||
name_terms = cls._query_terms(name)
|
||||
score = 0
|
||||
|
||||
if query_norm == identifier:
|
||||
score += 140
|
||||
if query_norm == name:
|
||||
score += 130
|
||||
if normalized_identifier == query_norm:
|
||||
score += 125
|
||||
if normalized_name == query_norm:
|
||||
score += 120
|
||||
if normalized_identifier.startswith(query_norm):
|
||||
score += 95
|
||||
if normalized_name.startswith(query_norm):
|
||||
score += 90
|
||||
if query_terms and identifier_terms[: len(query_terms)] == query_terms:
|
||||
score += 70
|
||||
if query_terms and name_terms[: len(query_terms)] == query_terms:
|
||||
score += 65
|
||||
if query_norm in identifier:
|
||||
score += 40
|
||||
if query_norm in name:
|
||||
score += 35
|
||||
if query_norm in description:
|
||||
score += 10
|
||||
|
||||
for term in query_terms:
|
||||
if term in identifier_terms:
|
||||
score += 15
|
||||
if term in name_terms:
|
||||
score += 12
|
||||
if term in description:
|
||||
score += 3
|
||||
|
||||
return score
|
||||
|
||||
@staticmethod
|
||||
def _dedupe_results(results: List[SkillMeta]) -> List[SkillMeta]:
|
||||
seen: set[str] = set()
|
||||
deduped: List[SkillMeta] = []
|
||||
for result in results:
|
||||
key = (result.identifier or result.name).lower()
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
deduped.append(result)
|
||||
return deduped
|
||||
|
||||
def _exact_slug_meta(self, query: str) -> Optional[SkillMeta]:
|
||||
slug = query.strip().split("/")[-1]
|
||||
query_terms = self._query_terms(query)
|
||||
candidates: List[str] = []
|
||||
|
||||
if slug and re.fullmatch(r"[A-Za-z0-9][A-Za-z0-9._-]*", slug):
|
||||
candidates.append(slug)
|
||||
|
||||
if query_terms:
|
||||
base_slug = "-".join(query_terms)
|
||||
if len(query_terms) >= 2:
|
||||
candidates.extend([
|
||||
f"{base_slug}-agent",
|
||||
f"{base_slug}-skill",
|
||||
f"{base_slug}-tool",
|
||||
f"{base_slug}-assistant",
|
||||
f"{base_slug}-playbook",
|
||||
base_slug,
|
||||
])
|
||||
else:
|
||||
candidates.append(base_slug)
|
||||
|
||||
seen: set[str] = set()
|
||||
for candidate in candidates:
|
||||
if candidate in seen:
|
||||
continue
|
||||
seen.add(candidate)
|
||||
meta = self.inspect(candidate)
|
||||
if meta:
|
||||
return meta
|
||||
|
||||
return None
|
||||
|
||||
def _finalize_search_results(self, query: str, results: List[SkillMeta], limit: int) -> List[SkillMeta]:
|
||||
query_norm = query.strip()
|
||||
if not query_norm:
|
||||
return self._dedupe_results(results)[:limit]
|
||||
|
||||
filtered = [meta for meta in results if self._search_score(query_norm, meta) > 0]
|
||||
filtered.sort(
|
||||
key=lambda meta: (
|
||||
-self._search_score(query_norm, meta),
|
||||
meta.name.lower(),
|
||||
meta.identifier.lower(),
|
||||
)
|
||||
)
|
||||
filtered = self._dedupe_results(filtered)
|
||||
|
||||
exact = self._exact_slug_meta(query_norm)
|
||||
if exact:
|
||||
filtered = [meta for meta in filtered if self._search_score(query_norm, meta) >= 20]
|
||||
filtered = self._dedupe_results([exact] + filtered)
|
||||
|
||||
if filtered:
|
||||
return filtered[:limit]
|
||||
|
||||
if re.fullmatch(r"[A-Za-z0-9][A-Za-z0-9._/-]*", query_norm):
|
||||
return []
|
||||
|
||||
return self._dedupe_results(results)[:limit]
|
||||
|
||||
def search(self, query: str, limit: int = 10) -> List[SkillMeta]:
|
||||
cache_key = f"clawhub_search_{hashlib.md5(query.encode()).hexdigest()}"
|
||||
query = query.strip()
|
||||
|
||||
if query:
|
||||
query_terms = self._query_terms(query)
|
||||
if len(query_terms) >= 2:
|
||||
direct = self._exact_slug_meta(query)
|
||||
if direct:
|
||||
return [direct]
|
||||
|
||||
results = self._search_catalog(query, limit=limit)
|
||||
if results:
|
||||
return results
|
||||
|
||||
# Empty query or catalog fallback failure: use the lightweight listing API.
|
||||
cache_key = f"clawhub_search_listing_v1_{hashlib.md5(query.encode()).hexdigest()}_{limit}"
|
||||
cached = _read_index_cache(cache_key)
|
||||
if cached is not None:
|
||||
return [SkillMeta(**s) for s in cached][:limit]
|
||||
return self._finalize_search_results(
|
||||
query,
|
||||
[SkillMeta(**s) for s in cached],
|
||||
limit,
|
||||
)
|
||||
|
||||
try:
|
||||
resp = httpx.get(
|
||||
@@ -1185,20 +1350,19 @@ class ClawHubSource(SkillSource):
|
||||
continue
|
||||
display_name = item.get("displayName") or item.get("name") or slug
|
||||
summary = item.get("summary") or item.get("description") or ""
|
||||
tags = item.get("tags", [])
|
||||
if not isinstance(tags, list):
|
||||
tags = []
|
||||
tags = self._normalize_tags(item.get("tags", []))
|
||||
results.append(SkillMeta(
|
||||
name=display_name,
|
||||
description=summary,
|
||||
source="clawhub",
|
||||
identifier=slug,
|
||||
trust_level="community",
|
||||
tags=[str(t) for t in tags],
|
||||
tags=tags,
|
||||
))
|
||||
|
||||
_write_index_cache(cache_key, [_skill_meta_to_dict(s) for s in results])
|
||||
return results
|
||||
final_results = self._finalize_search_results(query, results, limit)
|
||||
_write_index_cache(cache_key, [_skill_meta_to_dict(s) for s in final_results])
|
||||
return final_results
|
||||
|
||||
def fetch(self, identifier: str) -> Optional[SkillBundle]:
|
||||
slug = identifier.split("/")[-1]
|
||||
@@ -1244,13 +1408,11 @@ class ClawHubSource(SkillSource):
|
||||
|
||||
def inspect(self, identifier: str) -> Optional[SkillMeta]:
|
||||
slug = identifier.split("/")[-1]
|
||||
data = self._get_json(f"{self.BASE_URL}/skills/{slug}")
|
||||
data = self._coerce_skill_payload(self._get_json(f"{self.BASE_URL}/skills/{slug}"))
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
|
||||
tags = data.get("tags", [])
|
||||
if not isinstance(tags, list):
|
||||
tags = []
|
||||
tags = self._normalize_tags(data.get("tags", []))
|
||||
|
||||
return SkillMeta(
|
||||
name=data.get("displayName") or data.get("name") or data.get("slug") or slug,
|
||||
@@ -1258,9 +1420,75 @@ class ClawHubSource(SkillSource):
|
||||
source="clawhub",
|
||||
identifier=data.get("slug") or slug,
|
||||
trust_level="community",
|
||||
tags=[str(t) for t in tags],
|
||||
tags=tags,
|
||||
)
|
||||
|
||||
def _search_catalog(self, query: str, limit: int = 10) -> List[SkillMeta]:
|
||||
cache_key = f"clawhub_search_catalog_v1_{hashlib.md5(f'{query}|{limit}'.encode()).hexdigest()}"
|
||||
cached = _read_index_cache(cache_key)
|
||||
if cached is not None:
|
||||
return [SkillMeta(**s) for s in cached][:limit]
|
||||
|
||||
catalog = self._load_catalog_index()
|
||||
if not catalog:
|
||||
return []
|
||||
|
||||
results = self._finalize_search_results(query, catalog, limit)
|
||||
_write_index_cache(cache_key, [_skill_meta_to_dict(s) for s in results])
|
||||
return results
|
||||
|
||||
def _load_catalog_index(self) -> List[SkillMeta]:
|
||||
cache_key = "clawhub_catalog_v1"
|
||||
cached = _read_index_cache(cache_key)
|
||||
if cached is not None:
|
||||
return [SkillMeta(**s) for s in cached]
|
||||
|
||||
cursor: Optional[str] = None
|
||||
results: List[SkillMeta] = []
|
||||
seen: set[str] = set()
|
||||
max_pages = 50
|
||||
|
||||
for _ in range(max_pages):
|
||||
params: Dict[str, Any] = {"limit": 200}
|
||||
if cursor:
|
||||
params["cursor"] = cursor
|
||||
|
||||
try:
|
||||
resp = httpx.get(f"{self.BASE_URL}/skills", params=params, timeout=30)
|
||||
if resp.status_code != 200:
|
||||
break
|
||||
data = resp.json()
|
||||
except (httpx.HTTPError, json.JSONDecodeError):
|
||||
break
|
||||
|
||||
items = data.get("items", []) if isinstance(data, dict) else []
|
||||
if not isinstance(items, list) or not items:
|
||||
break
|
||||
|
||||
for item in items:
|
||||
slug = item.get("slug")
|
||||
if not isinstance(slug, str) or not slug or slug in seen:
|
||||
continue
|
||||
seen.add(slug)
|
||||
display_name = item.get("displayName") or item.get("name") or slug
|
||||
summary = item.get("summary") or item.get("description") or ""
|
||||
tags = self._normalize_tags(item.get("tags", []))
|
||||
results.append(SkillMeta(
|
||||
name=display_name,
|
||||
description=summary,
|
||||
source="clawhub",
|
||||
identifier=slug,
|
||||
trust_level="community",
|
||||
tags=tags,
|
||||
))
|
||||
|
||||
cursor = data.get("nextCursor") if isinstance(data, dict) else None
|
||||
if not isinstance(cursor, str) or not cursor:
|
||||
break
|
||||
|
||||
_write_index_cache(cache_key, [_skill_meta_to_dict(s) for s in results])
|
||||
return results
|
||||
|
||||
def _get_json(self, url: str, timeout: int = 20) -> Optional[Any]:
|
||||
try:
|
||||
resp = httpx.get(url, timeout=timeout)
|
||||
|
||||
Reference in New Issue
Block a user