mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-28 23:11:37 +08:00
Compare commits
5 Commits
skill/gith
...
fix/dayton
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c724aa853d | ||
|
|
4eb31a2da4 | ||
|
|
c97806990f | ||
|
|
61999a6622 | ||
|
|
b0a66c6ad6 |
@@ -2773,6 +2773,10 @@ def set_config_value(key: str, value: str):
|
|||||||
"terminal.timeout": "TERMINAL_TIMEOUT",
|
"terminal.timeout": "TERMINAL_TIMEOUT",
|
||||||
"terminal.sandbox_dir": "TERMINAL_SANDBOX_DIR",
|
"terminal.sandbox_dir": "TERMINAL_SANDBOX_DIR",
|
||||||
"terminal.persistent_shell": "TERMINAL_PERSISTENT_SHELL",
|
"terminal.persistent_shell": "TERMINAL_PERSISTENT_SHELL",
|
||||||
|
"terminal.container_cpu": "TERMINAL_CONTAINER_CPU",
|
||||||
|
"terminal.container_memory": "TERMINAL_CONTAINER_MEMORY",
|
||||||
|
"terminal.container_disk": "TERMINAL_CONTAINER_DISK",
|
||||||
|
"terminal.container_persistent": "TERMINAL_CONTAINER_PERSISTENT",
|
||||||
}
|
}
|
||||||
if key in _config_to_env_sync:
|
if key in _config_to_env_sync:
|
||||||
save_env_value(_config_to_env_sync[key], str(value))
|
save_env_value(_config_to_env_sync[key], str(value))
|
||||||
|
|||||||
@@ -255,3 +255,57 @@ class TestEdgeCases:
|
|||||||
|
|
||||||
mgr.sync(force=True)
|
mgr.sync(force=True)
|
||||||
upload.assert_not_called() # _file_mtime_key returns None, skipped
|
upload.assert_not_called() # _file_mtime_key returns None, skipped
|
||||||
|
|
||||||
|
|
||||||
|
class TestBulkUpload:
|
||||||
|
"""Tests for the optional bulk_upload_fn callback."""
|
||||||
|
|
||||||
|
def test_bulk_upload_used_when_provided(self, tmp_files):
|
||||||
|
"""When bulk_upload_fn is set, it's called instead of per-file upload_fn."""
|
||||||
|
upload = MagicMock()
|
||||||
|
bulk_upload = MagicMock()
|
||||||
|
mgr = FileSyncManager(
|
||||||
|
get_files_fn=_make_get_files(tmp_files),
|
||||||
|
upload_fn=upload,
|
||||||
|
delete_fn=MagicMock(),
|
||||||
|
bulk_upload_fn=bulk_upload,
|
||||||
|
)
|
||||||
|
|
||||||
|
mgr.sync(force=True)
|
||||||
|
upload.assert_not_called()
|
||||||
|
bulk_upload.assert_called_once()
|
||||||
|
# All 3 files passed as a list of (host, remote) tuples
|
||||||
|
files_arg = bulk_upload.call_args[0][0]
|
||||||
|
assert len(files_arg) == 3
|
||||||
|
|
||||||
|
def test_fallback_to_upload_fn_when_no_bulk(self, tmp_files):
|
||||||
|
"""Without bulk_upload_fn, per-file upload_fn is used (backwards compat)."""
|
||||||
|
upload = MagicMock()
|
||||||
|
mgr = FileSyncManager(
|
||||||
|
get_files_fn=_make_get_files(tmp_files),
|
||||||
|
upload_fn=upload,
|
||||||
|
delete_fn=MagicMock(),
|
||||||
|
bulk_upload_fn=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
mgr.sync(force=True)
|
||||||
|
assert upload.call_count == 3
|
||||||
|
|
||||||
|
def test_bulk_upload_rollback_on_failure(self, tmp_files):
|
||||||
|
"""Bulk upload failure rolls back synced state so next sync retries."""
|
||||||
|
bulk_upload = MagicMock(side_effect=RuntimeError("upload failed"))
|
||||||
|
mgr = FileSyncManager(
|
||||||
|
get_files_fn=_make_get_files(tmp_files),
|
||||||
|
upload_fn=MagicMock(),
|
||||||
|
delete_fn=MagicMock(),
|
||||||
|
bulk_upload_fn=bulk_upload,
|
||||||
|
)
|
||||||
|
|
||||||
|
mgr.sync(force=True) # fails, should rollback
|
||||||
|
|
||||||
|
# State rolled back: next sync should retry all files
|
||||||
|
bulk_upload.side_effect = None
|
||||||
|
bulk_upload.reset_mock()
|
||||||
|
mgr.sync(force=True)
|
||||||
|
bulk_upload.assert_called_once()
|
||||||
|
assert len(bulk_upload.call_args[0][0]) == 3
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ import logging
|
|||||||
import math
|
import math
|
||||||
import shlex
|
import shlex
|
||||||
import threading
|
import threading
|
||||||
import warnings
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from tools.environments.base import (
|
from tools.environments.base import (
|
||||||
@@ -63,10 +62,9 @@ class DaytonaEnvironment(BaseEnvironment):
|
|||||||
memory_gib = max(1, math.ceil(memory / 1024))
|
memory_gib = max(1, math.ceil(memory / 1024))
|
||||||
disk_gib = max(1, math.ceil(disk / 1024))
|
disk_gib = max(1, math.ceil(disk / 1024))
|
||||||
if disk_gib > 10:
|
if disk_gib > 10:
|
||||||
warnings.warn(
|
logger.warning(
|
||||||
f"Daytona: requested disk ({disk_gib}GB) exceeds platform limit (10GB). "
|
"Daytona: requested disk (%dGB) exceeds platform limit (10GB). "
|
||||||
f"Capping to 10GB.",
|
"Capping to 10GB.", disk_gib,
|
||||||
stacklevel=2,
|
|
||||||
)
|
)
|
||||||
disk_gib = 10
|
disk_gib = 10
|
||||||
resources = Resources(cpu=cpu, memory=memory_gib, disk=disk_gib)
|
resources = Resources(cpu=cpu, memory=memory_gib, disk=disk_gib)
|
||||||
@@ -129,6 +127,7 @@ class DaytonaEnvironment(BaseEnvironment):
|
|||||||
get_files_fn=lambda: iter_sync_files(f"{self._remote_home}/.hermes"),
|
get_files_fn=lambda: iter_sync_files(f"{self._remote_home}/.hermes"),
|
||||||
upload_fn=self._daytona_upload,
|
upload_fn=self._daytona_upload,
|
||||||
delete_fn=self._daytona_delete,
|
delete_fn=self._daytona_delete,
|
||||||
|
bulk_upload_fn=self._daytona_bulk_upload,
|
||||||
)
|
)
|
||||||
self._sync_manager.sync(force=True)
|
self._sync_manager.sync(force=True)
|
||||||
self.init_session()
|
self.init_session()
|
||||||
@@ -139,6 +138,30 @@ class DaytonaEnvironment(BaseEnvironment):
|
|||||||
self._sandbox.process.exec(f"mkdir -p {parent}")
|
self._sandbox.process.exec(f"mkdir -p {parent}")
|
||||||
self._sandbox.fs.upload_file(host_path, remote_path)
|
self._sandbox.fs.upload_file(host_path, remote_path)
|
||||||
|
|
||||||
|
def _daytona_bulk_upload(self, files: list[tuple[str, str]]) -> None:
|
||||||
|
"""Upload many files in a single HTTP call via Daytona SDK.
|
||||||
|
|
||||||
|
Uses ``sandbox.fs.upload_files()`` which batches all files into one
|
||||||
|
multipart POST, avoiding per-file TLS/HTTP overhead (~580 files
|
||||||
|
goes from ~5 min to <2 s).
|
||||||
|
"""
|
||||||
|
from daytona.common.filesystem import FileUpload
|
||||||
|
|
||||||
|
if not files:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Pre-create all unique parent directories in one shell call
|
||||||
|
parents = sorted({str(Path(remote).parent) for _, remote in files})
|
||||||
|
if parents:
|
||||||
|
mkdir_cmd = "mkdir -p " + " ".join(shlex.quote(p) for p in parents)
|
||||||
|
self._sandbox.process.exec(mkdir_cmd)
|
||||||
|
|
||||||
|
uploads = [
|
||||||
|
FileUpload(source=host_path, destination=remote_path)
|
||||||
|
for host_path, remote_path in files
|
||||||
|
]
|
||||||
|
self._sandbox.fs.upload_files(uploads)
|
||||||
|
|
||||||
def _daytona_delete(self, remote_paths: list[str]) -> None:
|
def _daytona_delete(self, remote_paths: list[str]) -> None:
|
||||||
"""Batch-delete remote files via SDK exec."""
|
"""Batch-delete remote files via SDK exec."""
|
||||||
self._sandbox.process.exec(quoted_rm_command(remote_paths))
|
self._sandbox.process.exec(quoted_rm_command(remote_paths))
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ _FORCE_SYNC_ENV = "HERMES_FORCE_FILE_SYNC"
|
|||||||
|
|
||||||
# Transport callbacks provided by each backend
|
# Transport callbacks provided by each backend
|
||||||
UploadFn = Callable[[str, str], None] # (host_path, remote_path) -> raises on failure
|
UploadFn = Callable[[str, str], None] # (host_path, remote_path) -> raises on failure
|
||||||
|
BulkUploadFn = Callable[[list[tuple[str, str]]], None] # [(host_path, remote_path), ...] -> raises on failure
|
||||||
DeleteFn = Callable[[list[str]], None] # (remote_paths) -> raises on failure
|
DeleteFn = Callable[[list[str]], None] # (remote_paths) -> raises on failure
|
||||||
GetFilesFn = Callable[[], list[tuple[str, str]]] # () -> [(host_path, remote_path), ...]
|
GetFilesFn = Callable[[], list[tuple[str, str]]] # () -> [(host_path, remote_path), ...]
|
||||||
|
|
||||||
@@ -76,9 +77,11 @@ class FileSyncManager:
|
|||||||
upload_fn: UploadFn,
|
upload_fn: UploadFn,
|
||||||
delete_fn: DeleteFn,
|
delete_fn: DeleteFn,
|
||||||
sync_interval: float = _SYNC_INTERVAL_SECONDS,
|
sync_interval: float = _SYNC_INTERVAL_SECONDS,
|
||||||
|
bulk_upload_fn: BulkUploadFn | None = None,
|
||||||
):
|
):
|
||||||
self._get_files_fn = get_files_fn
|
self._get_files_fn = get_files_fn
|
||||||
self._upload_fn = upload_fn
|
self._upload_fn = upload_fn
|
||||||
|
self._bulk_upload_fn = bulk_upload_fn
|
||||||
self._delete_fn = delete_fn
|
self._delete_fn = delete_fn
|
||||||
self._synced_files: dict[str, tuple[float, int]] = {} # remote_path -> (mtime, size)
|
self._synced_files: dict[str, tuple[float, int]] = {} # remote_path -> (mtime, size)
|
||||||
self._last_sync_time: float = 0.0 # monotonic; 0 ensures first sync runs
|
self._last_sync_time: float = 0.0 # monotonic; 0 ensures first sync runs
|
||||||
@@ -129,6 +132,10 @@ class FileSyncManager:
|
|||||||
logger.debug("file_sync: deleting %d stale remote file(s)", len(to_delete))
|
logger.debug("file_sync: deleting %d stale remote file(s)", len(to_delete))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if to_upload and self._bulk_upload_fn is not None:
|
||||||
|
self._bulk_upload_fn(to_upload)
|
||||||
|
logger.debug("file_sync: bulk-uploaded %d file(s)", len(to_upload))
|
||||||
|
else:
|
||||||
for host_path, remote_path in to_upload:
|
for host_path, remote_path in to_upload:
|
||||||
self._upload_fn(host_path, remote_path)
|
self._upload_fn(host_path, remote_path)
|
||||||
logger.debug("file_sync: uploaded %s -> %s", host_path, remote_path)
|
logger.debug("file_sync: uploaded %s -> %s", host_path, remote_path)
|
||||||
|
|||||||
Reference in New Issue
Block a user