diff --git a/hermes_cli/main.py b/hermes_cli/main.py index 61b1d38a61b..714ad82bf29 100644 --- a/hermes_cli/main.py +++ b/hermes_cli/main.py @@ -3340,8 +3340,9 @@ def _model_flow_kimi(config, current_model=""): # Step 3: Model selection — show appropriate models for the endpoint if is_coding_plan: - # Coding Plan models (kimi-k2.5 first) + # Coding Plan models (kimi-k2.6 first) model_list = [ + "kimi-k2.6", "kimi-k2.5", "kimi-for-coding", "kimi-k2-thinking", diff --git a/hermes_cli/models.py b/hermes_cli/models.py index 88e7c4f5b5a..eb5fe678cc0 100644 --- a/hermes_cli/models.py +++ b/hermes_cli/models.py @@ -171,6 +171,7 @@ _PROVIDER_MODELS: dict[str, list[str]] = { "openai/gpt-oss-120b", ], "kimi-coding": [ + "kimi-k2.6", "kimi-k2.5", "kimi-for-coding", "kimi-k2-thinking", @@ -179,12 +180,14 @@ _PROVIDER_MODELS: dict[str, list[str]] = { "kimi-k2-0905-preview", ], "kimi-coding-cn": [ + "kimi-k2.6", "kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview", "kimi-k2-0905-preview", ], "moonshot": [ + "kimi-k2.6", "kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview", diff --git a/hermes_cli/setup.py b/hermes_cli/setup.py index 6ce9f6dfabc..1fcd88dff0e 100644 --- a/hermes_cli/setup.py +++ b/hermes_cli/setup.py @@ -93,8 +93,8 @@ _DEFAULT_PROVIDER_MODELS = { "gemini-3-flash-preview", "gemini-3.1-flash-lite-preview", ], "zai": ["glm-5.1", "glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"], - "kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], - "kimi-coding-cn": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], + "kimi-coding": ["kimi-k2.6", "kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], + "kimi-coding-cn": ["kimi-k2.6", "kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"], "arcee": ["trinity-large-thinking", "trinity-large-preview", "trinity-mini"], "minimax": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"], "minimax-cn": ["MiniMax-M2.7", "MiniMax-M2.5", "MiniMax-M2.1", "MiniMax-M2"],