Compare commits

...

2 Commits

Author SHA1 Message Date
Teknium
e8bf301e6f fix: update Gemini model catalog + wire models.dev as live model source
Follow-up for salvaged PR #5494:
- Update model catalog to Gemini 3.x + Gemma 4 (drop deprecated 2.0)
- Add list_agentic_models() to models_dev.py with noise filter
- Wire models.dev into _model_flow_api_key_provider as primary source
  (static curated list serves as offline fallback)
- Add gemini -> google mapping in PROVIDER_TO_MODELS_DEV
- Fix Gemma 4 context lengths to 256K (models.dev values)
- Update auxiliary model to gemini-3-flash-preview
- Expand tests: 3.x catalog, context lengths, models.dev integration
2026-04-06 10:19:19 -07:00
Teknium
4507567704 feat(providers): add Google AI Studio (Gemini) as a first-class provider
Cherry-picked from PR #5494 by kshitijk4poor.
Adds native Gemini support via Google's OpenAI-compatible endpoint.
Zero new dependencies.
2026-04-06 10:14:01 -07:00
12 changed files with 414 additions and 20 deletions

View File

@@ -14,6 +14,16 @@
# LLM_MODEL is no longer read from .env — this line is kept for reference only.
# LLM_MODEL=anthropic/claude-opus-4.6
# =============================================================================
# LLM PROVIDER (Google AI Studio / Gemini)
# =============================================================================
# Native Gemini API via Google's OpenAI-compatible endpoint.
# Get your key at: https://aistudio.google.com/app/apikey
# GOOGLE_API_KEY=your_google_ai_studio_key_here
# GEMINI_API_KEY=your_gemini_key_here # alias for GOOGLE_API_KEY
# Optional base URL override (default: Google's OpenAI-compatible endpoint)
# GEMINI_BASE_URL=https://generativelanguage.googleapis.com/v1beta/openai
# =============================================================================
# LLM PROVIDER (z.ai / GLM)
# =============================================================================

View File

@@ -55,6 +55,7 @@ logger = logging.getLogger(__name__)
# Default auxiliary models for direct API-key providers (cheap/fast for side tasks)
_API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = {
"gemini": "gemini-3-flash-preview",
"zai": "glm-4.5-flash",
"kimi-coding": "kimi-k2-turbo-preview",
"minimax": "MiniMax-M2.7-highspeed",

View File

@@ -24,10 +24,11 @@ logger = logging.getLogger(__name__)
# are preserved so the full model name reaches cache lookups and server queries.
_PROVIDER_PREFIXES: frozenset[str] = frozenset({
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
"zai", "kimi-coding", "minimax", "minimax-cn", "anthropic", "deepseek",
"gemini", "zai", "kimi-coding", "minimax", "minimax-cn", "anthropic", "deepseek",
"opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba",
"custom", "local",
# Common aliases
"google", "google-gemini", "google-ai-studio",
"glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot",
"github-models", "kimi", "moonshot", "claude", "deep-seek",
"opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen",
@@ -101,6 +102,11 @@ DEFAULT_CONTEXT_LENGTHS = {
"gpt-4": 128000,
# Google
"gemini": 1048576,
# Gemma (open models served via AI Studio)
"gemma-4-31b": 256000,
"gemma-4-26b": 256000,
"gemma-3": 131072,
"gemma": 8192, # fallback for older gemma models
# DeepSeek
"deepseek": 128000,
# Meta
@@ -175,7 +181,7 @@ _URL_TO_PROVIDER: Dict[str, str] = {
"dashscope.aliyuncs.com": "alibaba",
"dashscope-intl.aliyuncs.com": "alibaba",
"openrouter.ai": "openrouter",
"generativelanguage.googleapis.com": "google",
"generativelanguage.googleapis.com": "gemini",
"inference-api.nousresearch.com": "nous",
"api.deepseek.com": "deepseek",
"api.githubcopilot.com": "copilot",

View File

@@ -160,6 +160,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
"kilocode": "kilo",
"fireworks": "fireworks-ai",
"huggingface": "huggingface",
"gemini": "google",
"google": "google",
"xai": "xai",
"nvidia": "nvidia",
@@ -422,6 +423,39 @@ def list_provider_models(provider: str) -> List[str]:
return list(models.keys())
# Patterns that indicate non-agentic or noise models (TTS, embedding,
# dated preview snapshots, live/streaming-only, image-only).
import re
_NOISE_PATTERNS: re.Pattern = re.compile(
r"-tts\b|embedding|live-|-(preview|exp)-\d{2,4}[-_]|"
r"-image\b|-image-preview\b|-customtools\b",
re.IGNORECASE,
)
def list_agentic_models(provider: str) -> List[str]:
"""Return model IDs suitable for agentic use from models.dev.
Filters for tool_call=True and excludes noise (TTS, embedding,
dated preview snapshots, live/streaming, image-only models).
Returns an empty list on any failure.
"""
models = _get_provider_models(provider)
if models is None:
return []
result = []
for mid, entry in models.items():
if not isinstance(entry, dict):
continue
if not entry.get("tool_call", False):
continue
if _NOISE_PATTERNS.search(mid):
continue
result.append(mid)
return result
def search_models_dev(
query: str, provider: str = None, limit: int = 5
) -> List[Dict[str, Any]]:

View File

@@ -18,7 +18,8 @@ model:
# "anthropic" - Direct Anthropic API (requires: ANTHROPIC_API_KEY)
# "openai-codex" - OpenAI Codex (requires: hermes login --provider openai-codex)
# "copilot" - GitHub Copilot / GitHub Models (requires: GITHUB_TOKEN)
# "zai" - z.ai / ZhipuAI GLM (requires: GLM_API_KEY)
# "gemini" - Use Google AI Studio direct (requires: GOOGLE_API_KEY or GEMINI_API_KEY)
# "zai" - Use z.ai / ZhipuAI GLM models (requires: GLM_API_KEY)
# "kimi-coding" - Kimi / Moonshot AI (requires: KIMI_API_KEY)
# "minimax" - MiniMax global (requires: MINIMAX_API_KEY)
# "minimax-cn" - MiniMax China (requires: MINIMAX_CN_API_KEY)
@@ -315,7 +316,8 @@ compression:
# "auto" - Best available: OpenRouter → Nous Portal → main endpoint (default)
# "openrouter" - Force OpenRouter (requires OPENROUTER_API_KEY)
# "nous" - Force Nous Portal (requires: hermes login)
# "codex" - Force Codex OAuth (requires: hermes model → Codex).
# "gemini" - Force Google AI Studio direct (requires: GOOGLE_API_KEY or GEMINI_API_KEY)
# "codex" - Force Codex OAuth (requires: hermes model → Codex).
# Uses gpt-5.3-codex which supports vision.
# "main" - Use your custom endpoint (OPENAI_BASE_URL + OPENAI_API_KEY).
# Works with OpenAI API, local models, or any OpenAI-compatible

View File

@@ -69,6 +69,7 @@ DEVICE_AUTH_POLL_INTERVAL_CAP_SECONDS = 1 # poll at most every 1s
DEFAULT_CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex"
DEFAULT_GITHUB_MODELS_BASE_URL = "https://api.githubcopilot.com"
DEFAULT_COPILOT_ACP_BASE_URL = "acp://copilot"
DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai"
CODEX_OAUTH_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
CODEX_OAUTH_TOKEN_URL = "https://auth.openai.com/oauth/token"
CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS = 120
@@ -125,6 +126,14 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
inference_base_url=DEFAULT_COPILOT_ACP_BASE_URL,
base_url_env_var="COPILOT_ACP_BASE_URL",
),
"gemini": ProviderConfig(
id="gemini",
name="Google AI Studio",
auth_type="api_key",
inference_base_url="https://generativelanguage.googleapis.com/v1beta/openai",
api_key_env_vars=("GOOGLE_API_KEY", "GEMINI_API_KEY"),
base_url_env_var="GEMINI_BASE_URL",
),
"zai": ProviderConfig(
id="zai",
name="Z.AI / GLM",
@@ -758,6 +767,7 @@ def resolve_provider(
# Normalize provider aliases
_PROVIDER_ALIASES = {
"glm": "zai", "z-ai": "zai", "z.ai": "zai", "zhipu": "zai",
"google": "gemini", "google-gemini": "gemini", "google-ai-studio": "gemini",
"kimi": "kimi-coding", "moonshot": "kimi-coding",
"minimax-china": "minimax-cn", "minimax_cn": "minimax-cn",
"claude": "anthropic", "claude-code": "anthropic",

View File

@@ -590,6 +590,30 @@ OPTIONAL_ENV_VARS = {
"category": "provider",
"advanced": True,
},
"GOOGLE_API_KEY": {
"description": "Google AI Studio API key (also recognized as GEMINI_API_KEY)",
"prompt": "Google AI Studio API key",
"url": "https://aistudio.google.com/app/apikey",
"password": True,
"category": "provider",
"advanced": True,
},
"GEMINI_API_KEY": {
"description": "Google AI Studio API key (alias for GOOGLE_API_KEY)",
"prompt": "Gemini API key",
"url": "https://aistudio.google.com/app/apikey",
"password": True,
"category": "provider",
"advanced": True,
},
"GEMINI_BASE_URL": {
"description": "Google AI Studio base URL override",
"prompt": "Gemini base URL (leave empty for default)",
"url": None,
"password": False,
"category": "provider",
"advanced": True,
},
"GLM_API_KEY": {
"description": "Z.AI / GLM API key (also recognized as ZAI_API_KEY / Z_AI_API_KEY)",
"prompt": "Z.AI / GLM API key",

View File

@@ -921,6 +921,7 @@ def select_provider_and_model(args=None):
"copilot-acp": "GitHub Copilot ACP",
"copilot": "GitHub Copilot",
"anthropic": "Anthropic",
"gemini": "Google AI Studio",
"zai": "Z.AI / GLM",
"kimi-coding": "Kimi / Moonshot",
"minimax": "MiniMax",
@@ -952,6 +953,7 @@ def select_provider_and_model(args=None):
extended_providers = [
("copilot-acp", "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"),
("gemini", "Google AI Studio (Gemini models — OpenAI-compatible endpoint)"),
("zai", "Z.AI / GLM (Zhipu AI direct API)"),
("kimi-coding", "Kimi / Moonshot (Moonshot AI direct API)"),
("minimax", "MiniMax (global direct API)"),
@@ -1055,7 +1057,7 @@ def select_provider_and_model(args=None):
_model_flow_anthropic(config, current_model)
elif selected_provider == "kimi-coding":
_model_flow_kimi(config, current_model)
elif selected_provider in ("zai", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface"):
elif selected_provider in ("gemini", "zai", "minimax", "minimax-cn", "kilocode", "opencode-zen", "opencode-go", "ai-gateway", "alibaba", "huggingface"):
_model_flow_api_key_provider(config, selected_provider, current_model)
@@ -2209,24 +2211,37 @@ def _model_flow_api_key_provider(config, provider_id, current_model=""):
save_env_value(base_url_env, override)
effective_base = override
# Model selection — try live /models endpoint first, fall back to defaults.
# Providers with large live catalogs (100+ models) use a curated list instead
# so users see familiar model names rather than an overwhelming dump.
# Model selection — resolution order:
# 1. models.dev registry (cached, filtered for agentic/tool-capable models)
# 2. Curated static fallback list (offline insurance)
# 3. Live /models endpoint probe (small providers without models.dev data)
curated = _PROVIDER_MODELS.get(provider_id, [])
if curated and len(curated) >= 8:
# Try models.dev first — returns tool-capable models, filtered for noise
mdev_models: list = []
try:
from agent.models_dev import list_agentic_models
mdev_models = list_agentic_models(provider_id)
except Exception:
pass
if mdev_models:
model_list = mdev_models
print(f" Found {len(model_list)} model(s) from models.dev registry")
elif curated and len(curated) >= 8:
# Curated list is substantial — use it directly, skip live probe
live_models = None
model_list = curated
print(f" Showing {len(model_list)} curated models — use \"Enter custom model name\" for others.")
else:
api_key_for_probe = existing_key or (get_env_value(key_env) if key_env else "")
live_models = fetch_api_models(api_key_for_probe, effective_base)
if live_models and len(live_models) >= len(curated):
model_list = live_models
print(f" Found {len(model_list)} model(s) from {pconfig.name} API")
else:
model_list = curated
if model_list:
print(f" Showing {len(model_list)} curated models — use \"Enter custom model name\" for others.")
if live_models and len(live_models) >= len(curated):
model_list = live_models
print(f" Found {len(model_list)} model(s) from {pconfig.name} API")
else:
model_list = curated
if model_list:
print(f" Showing {len(model_list)} curated models — use \"Enter custom model name\" for others.")
# else: no defaults either, will fall through to raw input
if provider_id in {"opencode-zen", "opencode-go"}:
@@ -4182,7 +4197,7 @@ For more help on a command:
)
chat_parser.add_argument(
"--provider",
choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode"],
choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode"],
default=None,
help="Inference provider (default: auto)"
)

View File

@@ -41,6 +41,7 @@ _VENDOR_PREFIXES: dict[str, str] = {
"o3": "openai",
"o4": "openai",
"gemini": "google",
"gemma": "google",
"deepseek": "deepseek",
"glm": "z-ai",
"kimi": "moonshotai",
@@ -77,6 +78,7 @@ _STRIP_VENDOR_ONLY_PROVIDERS: frozenset[str] = frozenset({
# Providers whose own naming is authoritative -- pass through unchanged.
_PASSTHROUGH_PROVIDERS: frozenset[str] = frozenset({
"gemini",
"zai",
"kimi-coding",
"minimax",

View File

@@ -111,6 +111,17 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"gemini-2.5-pro",
"grok-code-fast-1",
],
"gemini": [
"gemini-3.1-pro-preview",
"gemini-3-flash-preview",
"gemini-3.1-flash-lite-preview",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
# Gemma open models (also served via AI Studio)
"gemma-4-31b-it",
"gemma-4-26b-it",
],
"zai": [
"glm-5",
"glm-5-turbo",
@@ -260,6 +271,7 @@ _PROVIDER_LABELS = {
"copilot-acp": "GitHub Copilot ACP",
"nous": "Nous Portal",
"copilot": "GitHub Copilot",
"gemini": "Google AI Studio",
"zai": "Z.AI / GLM",
"kimi-coding": "Kimi / Moonshot",
"minimax": "MiniMax",
@@ -286,6 +298,9 @@ _PROVIDER_ALIASES = {
"github-model": "copilot",
"github-copilot-acp": "copilot-acp",
"copilot-acp-agent": "copilot-acp",
"google": "gemini",
"google-gemini": "gemini",
"google-ai-studio": "gemini",
"kimi": "kimi-coding",
"moonshot": "kimi-coding",
"minimax-china": "minimax-cn",
@@ -550,7 +565,8 @@ def list_available_providers() -> list[dict[str, str]]:
# Canonical providers in display order
_PROVIDER_ORDER = [
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
"huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba",
"gemini", "huggingface",
"zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba",
"opencode-zen", "opencode-go",
"ai-gateway", "deepseek", "custom",
]

View File

@@ -111,6 +111,11 @@ _DEFAULT_PROVIDER_MODELS = {
"gemini-2.5-pro",
"grok-code-fast-1",
],
"gemini": [
"gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-3.1-flash-lite-preview",
"gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.5-flash-lite",
"gemma-4-31b-it", "gemma-4-26b-it",
],
"zai": ["glm-5", "glm-4.7", "glm-4.5", "glm-4.5-flash"],
"kimi-coding": ["kimi-k2.5", "kimi-k2-thinking", "kimi-k2-turbo-preview"],
"minimax": ["MiniMax-M2.7", "MiniMax-M2.7-highspeed", "MiniMax-M2.5", "MiniMax-M2.5-highspeed", "MiniMax-M2.1"],

View File

@@ -0,0 +1,269 @@
"""Tests for Google AI Studio (Gemini) provider integration."""
import os
import pytest
from unittest.mock import patch, MagicMock
from hermes_cli.auth import PROVIDER_REGISTRY, resolve_provider, resolve_api_key_provider_credentials
from hermes_cli.models import _PROVIDER_MODELS, _PROVIDER_LABELS, _PROVIDER_ALIASES, normalize_provider
from hermes_cli.model_normalize import normalize_model_for_provider, detect_vendor
from agent.model_metadata import get_model_context_length
from agent.models_dev import PROVIDER_TO_MODELS_DEV, list_agentic_models, _NOISE_PATTERNS
# ── Provider Registry ──
class TestGeminiProviderRegistry:
def test_gemini_in_registry(self):
assert "gemini" in PROVIDER_REGISTRY
def test_gemini_config(self):
pconfig = PROVIDER_REGISTRY["gemini"]
assert pconfig.id == "gemini"
assert pconfig.name == "Google AI Studio"
assert pconfig.auth_type == "api_key"
assert pconfig.inference_base_url == "https://generativelanguage.googleapis.com/v1beta/openai"
def test_gemini_env_vars(self):
pconfig = PROVIDER_REGISTRY["gemini"]
assert pconfig.api_key_env_vars == ("GOOGLE_API_KEY", "GEMINI_API_KEY")
assert pconfig.base_url_env_var == "GEMINI_BASE_URL"
def test_gemini_base_url(self):
assert "generativelanguage.googleapis.com" in PROVIDER_REGISTRY["gemini"].inference_base_url
# ── Provider Aliases ──
PROVIDER_ENV_VARS = (
"OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
"GOOGLE_API_KEY", "GEMINI_API_KEY", "GEMINI_BASE_URL",
"GLM_API_KEY", "ZAI_API_KEY", "KIMI_API_KEY",
"MINIMAX_API_KEY", "DEEPSEEK_API_KEY",
)
@pytest.fixture(autouse=True)
def _clean_provider_env(monkeypatch):
for var in PROVIDER_ENV_VARS:
monkeypatch.delenv(var, raising=False)
class TestGeminiAliases:
def test_explicit_gemini(self):
assert resolve_provider("gemini") == "gemini"
def test_alias_google(self):
assert resolve_provider("google") == "gemini"
def test_alias_google_gemini(self):
assert resolve_provider("google-gemini") == "gemini"
def test_alias_google_ai_studio(self):
assert resolve_provider("google-ai-studio") == "gemini"
def test_models_py_aliases(self):
assert _PROVIDER_ALIASES.get("google") == "gemini"
assert _PROVIDER_ALIASES.get("google-gemini") == "gemini"
assert _PROVIDER_ALIASES.get("google-ai-studio") == "gemini"
def test_normalize_provider(self):
assert normalize_provider("google") == "gemini"
assert normalize_provider("gemini") == "gemini"
assert normalize_provider("google-ai-studio") == "gemini"
# ── Auto-detection ──
class TestGeminiAutoDetection:
def test_auto_detects_google_api_key(self, monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "test-google-key")
assert resolve_provider("auto") == "gemini"
def test_auto_detects_gemini_api_key(self, monkeypatch):
monkeypatch.setenv("GEMINI_API_KEY", "test-gemini-key")
assert resolve_provider("auto") == "gemini"
def test_google_api_key_priority_over_gemini(self, monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "primary-key")
monkeypatch.setenv("GEMINI_API_KEY", "alias-key")
creds = resolve_api_key_provider_credentials("gemini")
assert creds["api_key"] == "primary-key"
assert creds["source"] == "GOOGLE_API_KEY"
# ── Credential Resolution ──
class TestGeminiCredentials:
def test_resolve_with_google_api_key(self, monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "google-secret")
creds = resolve_api_key_provider_credentials("gemini")
assert creds["provider"] == "gemini"
assert creds["api_key"] == "google-secret"
assert creds["base_url"] == "https://generativelanguage.googleapis.com/v1beta/openai"
def test_resolve_with_gemini_api_key(self, monkeypatch):
monkeypatch.setenv("GEMINI_API_KEY", "gemini-secret")
creds = resolve_api_key_provider_credentials("gemini")
assert creds["api_key"] == "gemini-secret"
def test_resolve_with_custom_base_url(self, monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "key")
monkeypatch.setenv("GEMINI_BASE_URL", "https://custom.endpoint/v1")
creds = resolve_api_key_provider_credentials("gemini")
assert creds["base_url"] == "https://custom.endpoint/v1"
def test_runtime_gemini(self, monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "google-key")
from hermes_cli.runtime_provider import resolve_runtime_provider
result = resolve_runtime_provider(requested="gemini")
assert result["provider"] == "gemini"
assert result["api_mode"] == "chat_completions"
assert result["api_key"] == "google-key"
assert result["base_url"] == "https://generativelanguage.googleapis.com/v1beta/openai"
# ── Model Catalog ──
class TestGeminiModelCatalog:
def test_provider_models_exist(self):
assert "gemini" in _PROVIDER_MODELS
models = _PROVIDER_MODELS["gemini"]
assert "gemini-2.5-pro" in models
assert "gemini-2.5-flash" in models
assert "gemma-4-31b-it" in models
def test_provider_models_has_3x(self):
models = _PROVIDER_MODELS["gemini"]
assert "gemini-3.1-pro-preview" in models
assert "gemini-3-flash-preview" in models
assert "gemini-3.1-flash-lite-preview" in models
def test_provider_label(self):
assert "gemini" in _PROVIDER_LABELS
assert _PROVIDER_LABELS["gemini"] == "Google AI Studio"
# ── Model Normalization ──
class TestGeminiModelNormalization:
def test_passthrough_bare_name(self):
assert normalize_model_for_provider("gemini-2.5-flash", "gemini") == "gemini-2.5-flash"
def test_strip_vendor_prefix(self):
assert normalize_model_for_provider("google/gemini-2.5-flash", "gemini") == "google/gemini-2.5-flash"
def test_gemma_vendor_detection(self):
assert detect_vendor("gemma-4-31b-it") == "google"
def test_gemini_vendor_detection(self):
assert detect_vendor("gemini-2.5-flash") == "google"
def test_aggregator_prepends_vendor(self):
result = normalize_model_for_provider("gemini-2.5-flash", "openrouter")
assert result == "google/gemini-2.5-flash"
def test_gemma_aggregator_prepends_vendor(self):
result = normalize_model_for_provider("gemma-4-31b-it", "openrouter")
assert result == "google/gemma-4-31b-it"
# ── Context Length ──
class TestGeminiContextLength:
def test_gemma_4_31b_context(self):
ctx = get_model_context_length("gemma-4-31b-it", provider="gemini")
assert ctx == 256000
def test_gemma_4_26b_context(self):
ctx = get_model_context_length("gemma-4-26b-it", provider="gemini")
assert ctx == 256000
def test_gemini_3_context(self):
ctx = get_model_context_length("gemini-3.1-pro-preview", provider="gemini")
assert ctx == 1048576
# ── Agent Init (no SyntaxError) ──
class TestGeminiAgentInit:
def test_agent_imports_without_error(self):
"""Verify run_agent.py has no SyntaxError (the critical bug)."""
import importlib
import run_agent
importlib.reload(run_agent)
def test_gemini_agent_uses_chat_completions(self, monkeypatch):
"""Gemini falls through to chat_completions — no special elif needed."""
monkeypatch.setenv("GOOGLE_API_KEY", "test-key")
with patch("run_agent.OpenAI") as mock_openai:
mock_openai.return_value = MagicMock()
from run_agent import AIAgent
agent = AIAgent(
model="gemini-2.5-flash",
provider="gemini",
api_key="test-key",
base_url="https://generativelanguage.googleapis.com/v1beta/openai",
)
assert agent.api_mode == "chat_completions"
assert agent.provider == "gemini"
# ── models.dev Integration ──
class TestGeminiModelsDev:
def test_gemini_mapped_to_google(self):
assert PROVIDER_TO_MODELS_DEV.get("gemini") == "google"
def test_noise_filter_excludes_tts(self):
assert _NOISE_PATTERNS.search("gemini-2.5-pro-preview-tts")
def test_noise_filter_excludes_dated_preview(self):
assert _NOISE_PATTERNS.search("gemini-2.5-flash-preview-04-17")
def test_noise_filter_excludes_embedding(self):
assert _NOISE_PATTERNS.search("gemini-embedding-001")
def test_noise_filter_excludes_live(self):
assert _NOISE_PATTERNS.search("gemini-live-2.5-flash")
def test_noise_filter_excludes_image(self):
assert _NOISE_PATTERNS.search("gemini-2.5-flash-image")
def test_noise_filter_excludes_customtools(self):
assert _NOISE_PATTERNS.search("gemini-3.1-pro-preview-customtools")
def test_noise_filter_passes_stable(self):
assert not _NOISE_PATTERNS.search("gemini-2.5-flash")
def test_noise_filter_passes_preview(self):
# Non-dated preview (e.g. gemini-3-flash-preview) should pass
assert not _NOISE_PATTERNS.search("gemini-3-flash-preview")
def test_noise_filter_passes_gemma(self):
assert not _NOISE_PATTERNS.search("gemma-4-31b-it")
def test_list_agentic_models_with_mock_data(self):
"""list_agentic_models filters correctly from mock models.dev data."""
mock_data = {
"google": {
"models": {
"gemini-3-flash-preview": {"tool_call": True},
"gemini-2.5-pro": {"tool_call": True},
"gemini-embedding-001": {"tool_call": False},
"gemini-2.5-flash-preview-tts": {"tool_call": False},
"gemini-live-2.5-flash": {"tool_call": True},
"gemini-2.5-flash-preview-04-17": {"tool_call": True},
"gemma-4-31b-it": {"tool_call": True},
}
}
}
with patch("agent.models_dev.fetch_models_dev", return_value=mock_data):
result = list_agentic_models("gemini")
assert "gemini-3-flash-preview" in result
assert "gemini-2.5-pro" in result
assert "gemma-4-31b-it" in result
# Filtered out:
assert "gemini-embedding-001" not in result # no tool_call
assert "gemini-2.5-flash-preview-tts" not in result # no tool_call
assert "gemini-live-2.5-flash" not in result # noise: live-
assert "gemini-2.5-flash-preview-04-17" not in result # noise: dated preview