fix: add kimi-coding-cn to remaining provider touchpoints

Follow-up for salvaged PR #7637. Adds kimi-coding-cn to:
- model_normalize.py (prefix strip)
- providers.py (models.dev mapping)
- runtime_provider.py (credential resolution)
- setup.py (model list + setup label)
- doctor.py (health check)
- trajectory_compressor.py (URL detection)
- models_dev.py (registry mapping)
- integrations/providers.md (docs)
This commit is contained in:
Teknium
2026-04-13 11:16:09 -07:00
committed by Teknium
parent 2b3aa36242
commit 0e60a9dc25
8 changed files with 17 additions and 6 deletions

View File

@@ -148,6 +148,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
"openai-codex": "openai", "openai-codex": "openai",
"zai": "zai", "zai": "zai",
"kimi-coding": "kimi-for-coding", "kimi-coding": "kimi-for-coding",
"kimi-coding-cn": "kimi-for-coding",
"minimax": "minimax", "minimax": "minimax",
"minimax-cn": "minimax-cn", "minimax-cn": "minimax-cn",
"deepseek": "deepseek", "deepseek": "deepseek",

View File

@@ -721,6 +721,7 @@ def run_doctor(args):
_apikey_providers = [ _apikey_providers = [
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True), ("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True),
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True), ("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True),
("Kimi / Moonshot (China)", ("KIMI_CN_API_KEY",), "https://api.moonshot.cn/v1/models", None, True),
("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True), ("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True),
("Hugging Face", ("HF_TOKEN",), "https://router.huggingface.co/v1/models", "HF_BASE_URL", True), ("Hugging Face", ("HF_TOKEN",), "https://router.huggingface.co/v1/models", "HF_BASE_URL", True),
("Alibaba/DashScope", ("DASHSCOPE_API_KEY",), "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", "DASHSCOPE_BASE_URL", True), ("Alibaba/DashScope", ("DASHSCOPE_API_KEY",), "https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models", "DASHSCOPE_BASE_URL", True),

View File

@@ -88,6 +88,7 @@ _AUTHORITATIVE_NATIVE_PROVIDERS: frozenset[str] = frozenset({
_MATCHING_PREFIX_STRIP_PROVIDERS: frozenset[str] = frozenset({ _MATCHING_PREFIX_STRIP_PROVIDERS: frozenset[str] = frozenset({
"zai", "zai",
"kimi-coding", "kimi-coding",
"kimi-coding-cn",
"minimax", "minimax",
"minimax-cn", "minimax-cn",
"alibaba", "alibaba",

View File

@@ -179,6 +179,7 @@ ALIASES: Dict[str, str] = {
# kimi-for-coding (models.dev ID) # kimi-for-coding (models.dev ID)
"kimi": "kimi-for-coding", "kimi": "kimi-for-coding",
"kimi-coding": "kimi-for-coding", "kimi-coding": "kimi-for-coding",
"kimi-coding-cn": "kimi-for-coding",
"moonshot": "kimi-for-coding", "moonshot": "kimi-for-coding",
# minimax-cn # minimax-cn

View File

@@ -608,7 +608,7 @@ def _resolve_explicit_runtime(
base_url = explicit_base_url base_url = explicit_base_url
if not base_url: if not base_url:
if provider == "kimi-coding": if provider in ("kimi-coding", "kimi-coding-cn"):
creds = resolve_api_key_provider_credentials(provider) creds = resolve_api_key_provider_credentials(provider)
base_url = creds.get("base_url", "").rstrip("/") base_url = creds.get("base_url", "").rstrip("/")
else: else:

View File

@@ -106,6 +106,7 @@ _DEFAULT_PROVIDER_MODELS = {
], ],
"zai": ["glm-5.1", "glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"], "zai": ["glm-5.1", "glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"],
"kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], "kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"],
"kimi-coding-cn": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"],
"minimax": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"], "minimax": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"],
"minimax-cn": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"], "minimax-cn": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"],
"ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"], "ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"],
@@ -815,6 +816,7 @@ def setup_model_provider(config: dict, *, quick: bool = False):
"copilot-acp": "GitHub Copilot ACP", "copilot-acp": "GitHub Copilot ACP",
"zai": "Z.AI / GLM", "zai": "Z.AI / GLM",
"kimi-coding": "Kimi / Moonshot", "kimi-coding": "Kimi / Moonshot",
"kimi-coding-cn": "Kimi / Moonshot (China)",
"minimax": "MiniMax", "minimax": "MiniMax",
"minimax-cn": "MiniMax CN", "minimax-cn": "MiniMax CN",
"anthropic": "Anthropic", "anthropic": "Anthropic",

View File

@@ -415,7 +415,7 @@ class TrajectoryCompressor:
return "codex" return "codex"
if "api.z.ai" in url: if "api.z.ai" in url:
return "zai" return "zai"
if "moonshot.ai" in url or "api.kimi.com" in url: if "moonshot.ai" in url or "moonshot.cn" in url or "api.kimi.com" in url:
return "kimi-coding" return "kimi-coding"
if "minimaxi.com" in url: if "minimaxi.com" in url:
return "minimax-cn" return "minimax-cn"

View File

@@ -23,6 +23,7 @@ You need at least one way to connect to an LLM. Use `hermes model` to switch pro
| **AI Gateway** | `AI_GATEWAY_API_KEY` in `~/.hermes/.env` (provider: `ai-gateway`) | | **AI Gateway** | `AI_GATEWAY_API_KEY` in `~/.hermes/.env` (provider: `ai-gateway`) |
| **z.ai / GLM** | `GLM_API_KEY` in `~/.hermes/.env` (provider: `zai`) | | **z.ai / GLM** | `GLM_API_KEY` in `~/.hermes/.env` (provider: `zai`) |
| **Kimi / Moonshot** | `KIMI_API_KEY` in `~/.hermes/.env` (provider: `kimi-coding`) | | **Kimi / Moonshot** | `KIMI_API_KEY` in `~/.hermes/.env` (provider: `kimi-coding`) |
| **Kimi / Moonshot (China)** | `KIMI_CN_API_KEY` in `~/.hermes/.env` (provider: `kimi-coding-cn`; aliases: `kimi-cn`, `moonshot-cn`) |
| **MiniMax** | `MINIMAX_API_KEY` in `~/.hermes/.env` (provider: `minimax`) | | **MiniMax** | `MINIMAX_API_KEY` in `~/.hermes/.env` (provider: `minimax`) |
| **MiniMax China** | `MINIMAX_CN_API_KEY` in `~/.hermes/.env` (provider: `minimax-cn`) | | **MiniMax China** | `MINIMAX_CN_API_KEY` in `~/.hermes/.env` (provider: `minimax-cn`) |
| **Alibaba Cloud** | `DASHSCOPE_API_KEY` in `~/.hermes/.env` (provider: `alibaba`, aliases: `dashscope`, `qwen`) | | **Alibaba Cloud** | `DASHSCOPE_API_KEY` in `~/.hermes/.env` (provider: `alibaba`, aliases: `dashscope`, `qwen`) |
@@ -143,10 +144,14 @@ These providers have built-in support with dedicated provider IDs. Set the API k
hermes chat --provider zai --model glm-5 hermes chat --provider zai --model glm-5
# Requires: GLM_API_KEY in ~/.hermes/.env # Requires: GLM_API_KEY in ~/.hermes/.env
# Kimi / Moonshot AI # Kimi / Moonshot AI (international: api.moonshot.ai)
hermes chat --provider kimi-coding --model kimi-for-coding hermes chat --provider kimi-coding --model kimi-for-coding
# Requires: KIMI_API_KEY in ~/.hermes/.env # Requires: KIMI_API_KEY in ~/.hermes/.env
# Kimi / Moonshot AI (China: api.moonshot.cn)
hermes chat --provider kimi-coding-cn --model kimi-k2.5
# Requires: KIMI_CN_API_KEY in ~/.hermes/.env
# MiniMax (global endpoint) # MiniMax (global endpoint)
hermes chat --provider minimax --model MiniMax-M2.7 hermes chat --provider minimax --model MiniMax-M2.7
# Requires: MINIMAX_API_KEY in ~/.hermes/.env # Requires: MINIMAX_API_KEY in ~/.hermes/.env
@@ -167,7 +172,7 @@ hermes chat --provider xiaomi --model mimo-v2-pro
Or set the provider permanently in `config.yaml`: Or set the provider permanently in `config.yaml`:
```yaml ```yaml
model: model:
provider: "zai" # or: kimi-coding, minimax, minimax-cn, alibaba, xiaomi provider: "zai" # or: kimi-coding, kimi-coding-cn, minimax, minimax-cn, alibaba, xiaomi
default: "glm-5" default: "glm-5"
``` ```
@@ -854,7 +859,7 @@ You can also select named custom providers from the interactive `hermes model` m
| **Cost optimization** | ClawRouter or OpenRouter with `sort: "price"` | | **Cost optimization** | ClawRouter or OpenRouter with `sort: "price"` |
| **Maximum privacy** | Ollama, vLLM, or llama.cpp (fully local) | | **Maximum privacy** | Ollama, vLLM, or llama.cpp (fully local) |
| **Enterprise / Azure** | Azure OpenAI with custom endpoint | | **Enterprise / Azure** | Azure OpenAI with custom endpoint |
| **Chinese AI models** | z.ai (GLM), Kimi/Moonshot, MiniMax, or Xiaomi MiMo (first-class providers) | | **Chinese AI models** | z.ai (GLM), Kimi/Moonshot (`kimi-coding` or `kimi-coding-cn`), MiniMax, or Xiaomi MiMo (first-class providers) |
:::tip :::tip
You can switch between providers at any time with `hermes model` — no restart required. Your conversation history, memory, and skills carry over regardless of which provider you use. You can switch between providers at any time with `hermes model` — no restart required. Your conversation history, memory, and skills carry over regardless of which provider you use.
@@ -929,7 +934,7 @@ fallback_model:
When activated, the fallback swaps the model and provider mid-session without losing your conversation. It fires **at most once** per session. When activated, the fallback swaps the model and provider mid-session without losing your conversation. It fires **at most once** per session.
Supported providers: `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `minimax`, `minimax-cn`, `deepseek`, `ai-gateway`, `opencode-zen`, `opencode-go`, `kilocode`, `xiaomi`, `alibaba`, `custom`. Supported providers: `openrouter`, `nous`, `openai-codex`, `copilot`, `copilot-acp`, `anthropic`, `huggingface`, `zai`, `kimi-coding`, `kimi-coding-cn`, `minimax`, `minimax-cn`, `deepseek`, `ai-gateway`, `opencode-zen`, `opencode-go`, `kilocode`, `xiaomi`, `alibaba`, `custom`.
:::tip :::tip
Fallback is configured exclusively through `config.yaml` — there are no environment variables for it. For full details on when it triggers, supported providers, and how it interacts with auxiliary tasks and delegation, see [Fallback Providers](/docs/user-guide/features/fallback-providers). Fallback is configured exclusively through `config.yaml` — there are no environment variables for it. For full details on when it triggers, supported providers, and how it interacts with auxiliary tasks and delegation, see [Fallback Providers](/docs/user-guide/features/fallback-providers).