Files
hermes-agent/environments/benchmarks/terminalbench_2/default.yaml
2026-04-08 18:34:01 -07:00

48 lines
1.8 KiB
YAML

# Terminal-Bench 2.0 Evaluation -- Default Configuration
#
# Eval-only environment for the TB2 benchmark (89 terminal tasks).
# Uses Modal terminal backend for per-task cloud-isolated sandboxes
# and OpenRouter for inference.
#
# Usage:
# python environments/benchmarks/terminalbench_2/terminalbench2_env.py evaluate \
# --config environments/benchmarks/terminalbench_2/default.yaml
#
# # Override model:
# python environments/benchmarks/terminalbench_2/terminalbench2_env.py evaluate \
# --config environments/benchmarks/terminalbench_2/default.yaml \
# --openai.model_name anthropic/claude-sonnet-4
env:
enabled_toolsets: ["terminal", "file"]
max_agent_turns: 100
max_token_length: 32000
agent_temperature: 1.0
terminal_backend: "modal"
terminal_timeout: 300 # 5 min per command (builds, pip install)
tool_pool_size: 128 # thread pool for 89 parallel tasks
dataset_name: "NousResearch/terminal-bench-2-verified-flattened"
test_timeout: 600
task_timeout: 900 # 15 min wall-clock per task, auto-FAIL if exceeded
tokenizer_name: "NousResearch/Hermes-3-Llama-3.1-8B"
use_wandb: true
wandb_name: "terminal-bench-2"
ensure_scores_are_not_same: false
data_dir_to_save_evals: "environments/benchmarks/evals/terminal-bench-2"
# CRITICAL: Limit concurrent Modal sandbox creations to avoid deadlocks.
# Modal's blocking calls (App.lookup, etc.) deadlock when too many sandboxes
# are created simultaneously inside thread pool workers via asyncio.run().
max_concurrent_tasks: 8
extra_body:
provider:
order: ["DeepInfra"]
allow_fallbacks: false
openai:
base_url: "https://openrouter.ai/api/v1"
model_name: "nvidia/nemotron-3-super-120b-a12b"
server_type: "openai"
health_check: false
timeout: 300 # 5 min per API call (default 1200s causes 20min stalls)
# api_key loaded from OPENROUTER_API_KEY in .env