diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py index fcbc880..8605a09 100644 --- a/nanobot/agent/loop.py +++ b/nanobot/agent/loop.py @@ -5,7 +5,6 @@ from __future__ import annotations import asyncio import json import re -import weakref from contextlib import AsyncExitStack from pathlib import Path from typing import TYPE_CHECKING, Any, Awaitable, Callable @@ -13,7 +12,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable from loguru import logger from nanobot.agent.context import ContextBuilder -from nanobot.agent.memory import MemoryStore +from nanobot.agent.memory import MemoryConsolidator from nanobot.agent.subagent import SubagentManager from nanobot.agent.tools.cron import CronTool from nanobot.agent.tools.filesystem import EditFileTool, ListDirTool, ReadFileTool, WriteFileTool @@ -55,8 +54,8 @@ class AgentLoop: max_iterations: int = 40, temperature: float = 0.1, max_tokens: int = 4096, - memory_window: int = 100, reasoning_effort: str | None = None, + context_window_tokens: int = 65_536, brave_api_key: str | None = None, web_proxy: str | None = None, exec_config: ExecToolConfig | None = None, @@ -75,8 +74,8 @@ class AgentLoop: self.max_iterations = max_iterations self.temperature = temperature self.max_tokens = max_tokens - self.memory_window = memory_window self.reasoning_effort = reasoning_effort + self.context_window_tokens = context_window_tokens self.brave_api_key = brave_api_key self.web_proxy = web_proxy self.exec_config = exec_config or ExecToolConfig() @@ -105,11 +104,17 @@ class AgentLoop: self._mcp_stack: AsyncExitStack | None = None self._mcp_connected = False self._mcp_connecting = False - self._consolidating: set[str] = set() # Session keys with consolidation in progress - self._consolidation_tasks: set[asyncio.Task] = set() # Strong refs to in-flight tasks - self._consolidation_locks: weakref.WeakValueDictionary[str, asyncio.Lock] = weakref.WeakValueDictionary() self._active_tasks: dict[str, list[asyncio.Task]] = {} # session_key -> tasks self._processing_lock = asyncio.Lock() + self.memory_consolidator = MemoryConsolidator( + workspace=workspace, + provider=provider, + model=self.model, + sessions=self.sessions, + context_window_tokens=context_window_tokens, + build_messages=self.context.build_messages, + get_tool_definitions=self.tools.get_definitions, + ) self._register_default_tools() def _register_default_tools(self) -> None: @@ -182,7 +187,7 @@ class AgentLoop: initial_messages: list[dict], on_progress: Callable[..., Awaitable[None]] | None = None, ) -> tuple[str | None, list[str], list[dict]]: - """Run the agent iteration loop. Returns (final_content, tools_used, messages).""" + """Run the agent iteration loop.""" messages = initial_messages iteration = 0 final_content = None @@ -191,9 +196,11 @@ class AgentLoop: while iteration < self.max_iterations: iteration += 1 + tool_defs = self.tools.get_definitions() + response = await self.provider.chat_with_retry( messages=messages, - tools=self.tools.get_definitions(), + tools=tool_defs, model=self.model, temperature=self.temperature, max_tokens=self.max_tokens, @@ -341,8 +348,9 @@ class AgentLoop: logger.info("Processing system message from {}", msg.sender_id) key = f"{channel}:{chat_id}" session = self.sessions.get_or_create(key) + await self.memory_consolidator.maybe_consolidate_by_tokens(session) self._set_tool_context(channel, chat_id, msg.metadata.get("message_id")) - history = session.get_history(max_messages=self.memory_window) + history = session.get_history(max_messages=0) messages = self.context.build_messages( history=history, current_message=msg.content, channel=channel, chat_id=chat_id, @@ -350,6 +358,7 @@ class AgentLoop: final_content, _, all_msgs = await self._run_agent_loop(messages) self._save_turn(session, all_msgs, 1 + len(history)) self.sessions.save(session) + await self.memory_consolidator.maybe_consolidate_by_tokens(session) return OutboundMessage(channel=channel, chat_id=chat_id, content=final_content or "Background task completed.") @@ -362,27 +371,20 @@ class AgentLoop: # Slash commands cmd = msg.content.strip().lower() if cmd == "/new": - lock = self._consolidation_locks.setdefault(session.key, asyncio.Lock()) - self._consolidating.add(session.key) try: - async with lock: - snapshot = session.messages[session.last_consolidated:] - if snapshot: - temp = Session(key=session.key) - temp.messages = list(snapshot) - if not await self._consolidate_memory(temp, archive_all=True): - return OutboundMessage( - channel=msg.channel, chat_id=msg.chat_id, - content="Memory archival failed, session not cleared. Please try again.", - ) + if not await self.memory_consolidator.archive_unconsolidated(session): + return OutboundMessage( + channel=msg.channel, + chat_id=msg.chat_id, + content="Memory archival failed, session not cleared. Please try again.", + ) except Exception: logger.exception("/new archival failed for {}", session.key) return OutboundMessage( - channel=msg.channel, chat_id=msg.chat_id, + channel=msg.channel, + chat_id=msg.chat_id, content="Memory archival failed, session not cleared. Please try again.", ) - finally: - self._consolidating.discard(session.key) session.clear() self.sessions.save(session) @@ -393,30 +395,14 @@ class AgentLoop: return OutboundMessage(channel=msg.channel, chat_id=msg.chat_id, content="🐈 nanobot commands:\n/new — Start a new conversation\n/stop — Stop the current task\n/help — Show available commands") - unconsolidated = len(session.messages) - session.last_consolidated - if (unconsolidated >= self.memory_window and session.key not in self._consolidating): - self._consolidating.add(session.key) - lock = self._consolidation_locks.setdefault(session.key, asyncio.Lock()) - - async def _consolidate_and_unlock(): - try: - async with lock: - await self._consolidate_memory(session) - finally: - self._consolidating.discard(session.key) - _task = asyncio.current_task() - if _task is not None: - self._consolidation_tasks.discard(_task) - - _task = asyncio.create_task(_consolidate_and_unlock()) - self._consolidation_tasks.add(_task) + await self.memory_consolidator.maybe_consolidate_by_tokens(session) self._set_tool_context(msg.channel, msg.chat_id, msg.metadata.get("message_id")) if message_tool := self.tools.get("message"): if isinstance(message_tool, MessageTool): message_tool.start_turn() - history = session.get_history(max_messages=self.memory_window) + history = session.get_history(max_messages=0) initial_messages = self.context.build_messages( history=history, current_message=msg.content, @@ -441,6 +427,7 @@ class AgentLoop: self._save_turn(session, all_msgs, 1 + len(history)) self.sessions.save(session) + await self.memory_consolidator.maybe_consolidate_by_tokens(session) if (mt := self.tools.get("message")) and isinstance(mt, MessageTool) and mt._sent_in_turn: return None @@ -487,13 +474,6 @@ class AgentLoop: session.messages.append(entry) session.updated_at = datetime.now() - async def _consolidate_memory(self, session, archive_all: bool = False) -> bool: - """Delegate to MemoryStore.consolidate(). Returns True on success.""" - return await MemoryStore(self.workspace).consolidate( - session, self.provider, self.model, - archive_all=archive_all, memory_window=self.memory_window, - ) - async def process_direct( self, content: str, diff --git a/nanobot/agent/memory.py b/nanobot/agent/memory.py index 66efec2..cd5f54f 100644 --- a/nanobot/agent/memory.py +++ b/nanobot/agent/memory.py @@ -2,17 +2,19 @@ from __future__ import annotations +import asyncio import json +import weakref from pathlib import Path -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Callable from loguru import logger -from nanobot.utils.helpers import ensure_dir +from nanobot.utils.helpers import ensure_dir, estimate_message_tokens, estimate_prompt_tokens_chain if TYPE_CHECKING: from nanobot.providers.base import LLMProvider - from nanobot.session.manager import Session + from nanobot.session.manager import Session, SessionManager _SAVE_MEMORY_TOOL = [ @@ -26,7 +28,7 @@ _SAVE_MEMORY_TOOL = [ "properties": { "history_entry": { "type": "string", - "description": "A paragraph (2-5 sentences) summarizing key events/decisions/topics. " + "description": "A paragraph summarizing key events/decisions/topics. " "Start with [YYYY-MM-DD HH:MM]. Include detail useful for grep search.", }, "memory_update": { @@ -42,6 +44,20 @@ _SAVE_MEMORY_TOOL = [ ] +def _ensure_text(value: Any) -> str: + """Normalize tool-call payload values to text for file storage.""" + return value if isinstance(value, str) else json.dumps(value, ensure_ascii=False) + + +def _normalize_save_memory_args(args: Any) -> dict[str, Any] | None: + """Normalize provider tool-call arguments to the expected dict shape.""" + if isinstance(args, str): + args = json.loads(args) + if isinstance(args, list): + return args[0] if args and isinstance(args[0], dict) else None + return args if isinstance(args, dict) else None + + class MemoryStore: """Two-layer memory: MEMORY.md (long-term facts) + HISTORY.md (grep-searchable log).""" @@ -66,40 +82,27 @@ class MemoryStore: long_term = self.read_long_term() return f"## Long-term Memory\n{long_term}" if long_term else "" + @staticmethod + def _format_messages(messages: list[dict]) -> str: + lines = [] + for message in messages: + if not message.get("content"): + continue + tools = f" [tools: {', '.join(message['tools_used'])}]" if message.get("tools_used") else "" + lines.append( + f"[{message.get('timestamp', '?')[:16]}] {message['role'].upper()}{tools}: {message['content']}" + ) + return "\n".join(lines) + async def consolidate( self, - session: Session, + messages: list[dict], provider: LLMProvider, model: str, - *, - archive_all: bool = False, - memory_window: int = 50, ) -> bool: - """Consolidate old messages into MEMORY.md + HISTORY.md via LLM tool call. - - Returns True on success (including no-op), False on failure. - """ - if archive_all: - old_messages = session.messages - keep_count = 0 - logger.info("Memory consolidation (archive_all): {} messages", len(session.messages)) - else: - keep_count = memory_window // 2 - if len(session.messages) <= keep_count: - return True - if len(session.messages) - session.last_consolidated <= 0: - return True - old_messages = session.messages[session.last_consolidated:-keep_count] - if not old_messages: - return True - logger.info("Memory consolidation: {} to consolidate, {} keep", len(old_messages), keep_count) - - lines = [] - for m in old_messages: - if not m.get("content"): - continue - tools = f" [tools: {', '.join(m['tools_used'])}]" if m.get("tools_used") else "" - lines.append(f"[{m.get('timestamp', '?')[:16]}] {m['role'].upper()}{tools}: {m['content']}") + """Consolidate the provided message chunk into MEMORY.md + HISTORY.md.""" + if not messages: + return True current_memory = self.read_long_term() prompt = f"""Process this conversation and call the save_memory tool with your consolidation. @@ -108,7 +111,7 @@ class MemoryStore: {current_memory or "(empty)"} ## Conversation to Process -{chr(10).join(lines)}""" +{self._format_messages(messages)}""" try: response = await provider.chat_with_retry( @@ -124,34 +127,158 @@ class MemoryStore: logger.warning("Memory consolidation: LLM did not call save_memory, skipping") return False - args = response.tool_calls[0].arguments - # Some providers return arguments as a JSON string instead of dict - if isinstance(args, str): - args = json.loads(args) - # Some providers return arguments as a list (handle edge case) - if isinstance(args, list): - if args and isinstance(args[0], dict): - args = args[0] - else: - logger.warning("Memory consolidation: unexpected arguments as empty or non-dict list") - return False - if not isinstance(args, dict): - logger.warning("Memory consolidation: unexpected arguments type {}", type(args).__name__) + args = _normalize_save_memory_args(response.tool_calls[0].arguments) + if args is None: + logger.warning("Memory consolidation: unexpected save_memory arguments") return False if entry := args.get("history_entry"): - if not isinstance(entry, str): - entry = json.dumps(entry, ensure_ascii=False) - self.append_history(entry) + self.append_history(_ensure_text(entry)) if update := args.get("memory_update"): - if not isinstance(update, str): - update = json.dumps(update, ensure_ascii=False) + update = _ensure_text(update) if update != current_memory: self.write_long_term(update) - session.last_consolidated = 0 if archive_all else len(session.messages) - keep_count - logger.info("Memory consolidation done: {} messages, last_consolidated={}", len(session.messages), session.last_consolidated) + logger.info("Memory consolidation done for {} messages", len(messages)) return True except Exception: logger.exception("Memory consolidation failed") return False + + +class MemoryConsolidator: + """Owns consolidation policy, locking, and session offset updates.""" + + _MAX_CONSOLIDATION_ROUNDS = 5 + + def __init__( + self, + workspace: Path, + provider: LLMProvider, + model: str, + sessions: SessionManager, + context_window_tokens: int, + build_messages: Callable[..., list[dict[str, Any]]], + get_tool_definitions: Callable[[], list[dict[str, Any]]], + ): + self.store = MemoryStore(workspace) + self.provider = provider + self.model = model + self.sessions = sessions + self.context_window_tokens = context_window_tokens + self._build_messages = build_messages + self._get_tool_definitions = get_tool_definitions + self._locks: weakref.WeakValueDictionary[str, asyncio.Lock] = weakref.WeakValueDictionary() + + def get_lock(self, session_key: str) -> asyncio.Lock: + """Return the shared consolidation lock for one session.""" + return self._locks.setdefault(session_key, asyncio.Lock()) + + async def consolidate_messages(self, messages: list[dict[str, object]]) -> bool: + """Archive a selected message chunk into persistent memory.""" + return await self.store.consolidate(messages, self.provider, self.model) + + def pick_consolidation_boundary( + self, + session: Session, + tokens_to_remove: int, + ) -> tuple[int, int] | None: + """Pick a user-turn boundary that removes enough old prompt tokens.""" + start = session.last_consolidated + if start >= len(session.messages) or tokens_to_remove <= 0: + return None + + removed_tokens = 0 + last_boundary: tuple[int, int] | None = None + for idx in range(start, len(session.messages)): + message = session.messages[idx] + if idx > start and message.get("role") == "user": + last_boundary = (idx, removed_tokens) + if removed_tokens >= tokens_to_remove: + return last_boundary + removed_tokens += estimate_message_tokens(message) + + return last_boundary + + def estimate_session_prompt_tokens(self, session: Session) -> tuple[int, str]: + """Estimate current prompt size for the normal session history view.""" + history = session.get_history(max_messages=0) + channel, chat_id = (session.key.split(":", 1) if ":" in session.key else (None, None)) + probe_messages = self._build_messages( + history=history, + current_message="[token-probe]", + channel=channel, + chat_id=chat_id, + ) + return estimate_prompt_tokens_chain( + self.provider, + self.model, + probe_messages, + self._get_tool_definitions(), + ) + + async def archive_unconsolidated(self, session: Session) -> bool: + """Archive the full unconsolidated tail for /new-style session rollover.""" + lock = self.get_lock(session.key) + async with lock: + snapshot = session.messages[session.last_consolidated:] + if not snapshot: + return True + return await self.consolidate_messages(snapshot) + + async def maybe_consolidate_by_tokens(self, session: Session) -> None: + """Loop: archive old messages until prompt fits within half the context window.""" + if not session.messages or self.context_window_tokens <= 0: + return + + lock = self.get_lock(session.key) + async with lock: + target = self.context_window_tokens // 2 + estimated, source = self.estimate_session_prompt_tokens(session) + if estimated <= 0: + return + if estimated < self.context_window_tokens: + logger.debug( + "Token consolidation idle {}: {}/{} via {}", + session.key, + estimated, + self.context_window_tokens, + source, + ) + return + + for round_num in range(self._MAX_CONSOLIDATION_ROUNDS): + if estimated <= target: + return + + boundary = self.pick_consolidation_boundary(session, max(1, estimated - target)) + if boundary is None: + logger.debug( + "Token consolidation: no safe boundary for {} (round {})", + session.key, + round_num, + ) + return + + end_idx = boundary[0] + chunk = session.messages[session.last_consolidated:end_idx] + if not chunk: + return + + logger.info( + "Token consolidation round {} for {}: {}/{} via {}, chunk={} msgs", + round_num, + session.key, + estimated, + self.context_window_tokens, + source, + len(chunk), + ) + if not await self.consolidate_messages(chunk): + return + session.last_consolidated = end_idx + self.sessions.save(session) + + estimated, source = self.estimate_session_prompt_tokens(session) + if estimated <= 0: + return diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py index 37f08b2..cf69450 100644 --- a/nanobot/cli/commands.py +++ b/nanobot/cli/commands.py @@ -191,6 +191,8 @@ def onboard(): save_config(Config()) console.print(f"[green]✓[/green] Created config at {config_path}") + console.print("[dim]Config template now uses `maxTokens` + `contextWindowTokens`; `memoryWindow` is no longer a runtime setting.[/dim]") + # Create workspace workspace = get_workspace_path() @@ -283,6 +285,16 @@ def _load_runtime_config(config: str | None = None, workspace: str | None = None return loaded +def _print_deprecated_memory_window_notice(config: Config) -> None: + """Warn when running with old memoryWindow-only config.""" + if config.agents.defaults.should_warn_deprecated_memory_window: + console.print( + "[yellow]Hint:[/yellow] Detected deprecated `memoryWindow` without " + "`contextWindowTokens`. `memoryWindow` is ignored; run " + "[cyan]nanobot onboard[/cyan] to refresh your config template." + ) + + # ============================================================================ # Gateway / Server # ============================================================================ @@ -310,6 +322,7 @@ def gateway( logging.basicConfig(level=logging.DEBUG) config = _load_runtime_config(config, workspace) + _print_deprecated_memory_window_notice(config) port = port if port is not None else config.gateway.port console.print(f"{__logo__} Starting nanobot gateway on port {port}...") @@ -331,8 +344,8 @@ def gateway( temperature=config.agents.defaults.temperature, max_tokens=config.agents.defaults.max_tokens, max_iterations=config.agents.defaults.max_tool_iterations, - memory_window=config.agents.defaults.memory_window, reasoning_effort=config.agents.defaults.reasoning_effort, + context_window_tokens=config.agents.defaults.context_window_tokens, brave_api_key=config.tools.web.search.api_key or None, web_proxy=config.tools.web.proxy or None, exec_config=config.tools.exec, @@ -494,6 +507,7 @@ def agent( from nanobot.cron.service import CronService config = _load_runtime_config(config, workspace) + _print_deprecated_memory_window_notice(config) sync_workspace_templates(config.workspace_path) bus = MessageBus() @@ -516,8 +530,8 @@ def agent( temperature=config.agents.defaults.temperature, max_tokens=config.agents.defaults.max_tokens, max_iterations=config.agents.defaults.max_tool_iterations, - memory_window=config.agents.defaults.memory_window, reasoning_effort=config.agents.defaults.reasoning_effort, + context_window_tokens=config.agents.defaults.context_window_tokens, brave_api_key=config.tools.web.search.api_key or None, web_proxy=config.tools.web.proxy or None, exec_config=config.tools.exec, diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py index 8cfcad6..a2de239 100644 --- a/nanobot/config/schema.py +++ b/nanobot/config/schema.py @@ -228,11 +228,18 @@ class AgentDefaults(Base): "auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection ) max_tokens: int = 8192 + context_window_tokens: int = 65_536 temperature: float = 0.1 max_tool_iterations: int = 40 - memory_window: int = 100 + # Deprecated compatibility field: accepted from old configs but ignored at runtime. + memory_window: int | None = Field(default=None, exclude=True) reasoning_effort: str | None = None # low / medium / high — enables LLM thinking mode + @property + def should_warn_deprecated_memory_window(self) -> bool: + """Return True when old memoryWindow is present without contextWindowTokens.""" + return self.memory_window is not None and "context_window_tokens" not in self.model_fields_set + class AgentsConfig(Base): """Agent configuration.""" diff --git a/nanobot/utils/helpers.py b/nanobot/utils/helpers.py index 57c60dc..9242ba6 100644 --- a/nanobot/utils/helpers.py +++ b/nanobot/utils/helpers.py @@ -1,8 +1,12 @@ """Utility functions for nanobot.""" +import json import re from datetime import datetime from pathlib import Path +from typing import Any + +import tiktoken def detect_image_mime(data: bytes) -> str | None: @@ -68,6 +72,87 @@ def split_message(content: str, max_len: int = 2000) -> list[str]: return chunks +def estimate_prompt_tokens( + messages: list[dict[str, Any]], + tools: list[dict[str, Any]] | None = None, +) -> int: + """Estimate prompt tokens with tiktoken.""" + try: + enc = tiktoken.get_encoding("cl100k_base") + parts: list[str] = [] + for msg in messages: + content = msg.get("content") + if isinstance(content, str): + parts.append(content) + elif isinstance(content, list): + for part in content: + if isinstance(part, dict) and part.get("type") == "text": + txt = part.get("text", "") + if txt: + parts.append(txt) + if tools: + parts.append(json.dumps(tools, ensure_ascii=False)) + return len(enc.encode("\n".join(parts))) + except Exception: + return 0 + + +def estimate_message_tokens(message: dict[str, Any]) -> int: + """Estimate prompt tokens contributed by one persisted message.""" + content = message.get("content") + parts: list[str] = [] + if isinstance(content, str): + parts.append(content) + elif isinstance(content, list): + for part in content: + if isinstance(part, dict) and part.get("type") == "text": + text = part.get("text", "") + if text: + parts.append(text) + else: + parts.append(json.dumps(part, ensure_ascii=False)) + elif content is not None: + parts.append(json.dumps(content, ensure_ascii=False)) + + for key in ("name", "tool_call_id"): + value = message.get(key) + if isinstance(value, str) and value: + parts.append(value) + if message.get("tool_calls"): + parts.append(json.dumps(message["tool_calls"], ensure_ascii=False)) + + payload = "\n".join(parts) + if not payload: + return 1 + try: + enc = tiktoken.get_encoding("cl100k_base") + return max(1, len(enc.encode(payload))) + except Exception: + return max(1, len(payload) // 4) + + +def estimate_prompt_tokens_chain( + provider: Any, + model: str | None, + messages: list[dict[str, Any]], + tools: list[dict[str, Any]] | None = None, +) -> tuple[int, str]: + """Estimate prompt tokens via provider counter first, then tiktoken fallback.""" + provider_counter = getattr(provider, "estimate_prompt_tokens", None) + if callable(provider_counter): + try: + tokens, source = provider_counter(messages, tools, model) + if isinstance(tokens, (int, float)) and tokens > 0: + return int(tokens), str(source or "provider_counter") + except Exception: + pass + + estimated = estimate_prompt_tokens(messages, tools) + if estimated > 0: + return int(estimated), "tiktoken" + return 0, "none" + + def sync_workspace_templates(workspace: Path, silent: bool = False) -> list[str]: """Sync bundled templates to workspace. Only creates missing files.""" from importlib.resources import files as pkg_files diff --git a/pyproject.toml b/pyproject.toml index 62cf616..4984b03 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ classifiers = [ dependencies = [ "typer>=0.20.0,<1.0.0", - "litellm>=1.81.5,<2.0.0", + "litellm>=1.82.1,<2.0.0", "pydantic>=2.12.0,<3.0.0", "pydantic-settings>=2.12.0,<3.0.0", "websockets>=16.0,<17.0", @@ -44,6 +44,7 @@ dependencies = [ "json-repair>=0.57.0,<1.0.0", "chardet>=3.0.2,<6.0.0", "openai>=2.8.0", + "tiktoken>=0.12.0,<1.0.0", ] [project.optional-dependencies] diff --git a/tests/test_commands.py b/tests/test_commands.py index 5e3760a..1375a3a 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -267,6 +267,16 @@ def test_agent_workspace_override_wins_over_config_workspace(mock_agent_runtime, assert mock_agent_runtime["agent_loop_cls"].call_args.kwargs["workspace"] == workspace_path +def test_agent_warns_about_deprecated_memory_window(mock_agent_runtime): + mock_agent_runtime["config"].agents.defaults.memory_window = 100 + + result = runner.invoke(app, ["agent", "-m", "hello"]) + + assert result.exit_code == 0 + assert "memoryWindow" in result.stdout + assert "contextWindowTokens" in result.stdout + + def test_gateway_uses_workspace_from_config_by_default(monkeypatch, tmp_path: Path) -> None: config_file = tmp_path / "instance" / "config.json" config_file.parent.mkdir(parents=True) @@ -327,6 +337,29 @@ def test_gateway_workspace_option_overrides_config(monkeypatch, tmp_path: Path) assert seen["workspace"] == override assert config.workspace_path == override + +def test_gateway_warns_about_deprecated_memory_window(monkeypatch, tmp_path: Path) -> None: + config_file = tmp_path / "instance" / "config.json" + config_file.parent.mkdir(parents=True) + config_file.write_text("{}") + + config = Config() + config.agents.defaults.memory_window = 100 + + monkeypatch.setattr("nanobot.config.loader.set_config_path", lambda _path: None) + monkeypatch.setattr("nanobot.config.loader.load_config", lambda _path=None: config) + monkeypatch.setattr("nanobot.cli.commands.sync_workspace_templates", lambda _path: None) + monkeypatch.setattr( + "nanobot.cli.commands._make_provider", + lambda _config: (_ for _ in ()).throw(_StopGateway("stop")), + ) + + result = runner.invoke(app, ["gateway", "--config", str(config_file)]) + + assert isinstance(result.exception, _StopGateway) + assert "memoryWindow" in result.stdout + assert "contextWindowTokens" in result.stdout + def test_gateway_uses_config_directory_for_cron_store(monkeypatch, tmp_path: Path) -> None: config_file = tmp_path / "instance" / "config.json" config_file.parent.mkdir(parents=True) diff --git a/tests/test_config_migration.py b/tests/test_config_migration.py new file mode 100644 index 0000000..62e601e --- /dev/null +++ b/tests/test_config_migration.py @@ -0,0 +1,88 @@ +import json + +from typer.testing import CliRunner + +from nanobot.cli.commands import app +from nanobot.config.loader import load_config, save_config + +runner = CliRunner() + + +def test_load_config_keeps_max_tokens_and_warns_on_legacy_memory_window(tmp_path) -> None: + config_path = tmp_path / "config.json" + config_path.write_text( + json.dumps( + { + "agents": { + "defaults": { + "maxTokens": 1234, + "memoryWindow": 42, + } + } + } + ), + encoding="utf-8", + ) + + config = load_config(config_path) + + assert config.agents.defaults.max_tokens == 1234 + assert config.agents.defaults.context_window_tokens == 65_536 + assert config.agents.defaults.should_warn_deprecated_memory_window is True + + +def test_save_config_writes_context_window_tokens_but_not_memory_window(tmp_path) -> None: + config_path = tmp_path / "config.json" + config_path.write_text( + json.dumps( + { + "agents": { + "defaults": { + "maxTokens": 2222, + "memoryWindow": 30, + } + } + } + ), + encoding="utf-8", + ) + + config = load_config(config_path) + save_config(config, config_path) + saved = json.loads(config_path.read_text(encoding="utf-8")) + defaults = saved["agents"]["defaults"] + + assert defaults["maxTokens"] == 2222 + assert defaults["contextWindowTokens"] == 65_536 + assert "memoryWindow" not in defaults + + +def test_onboard_refresh_rewrites_legacy_config_template(tmp_path, monkeypatch) -> None: + config_path = tmp_path / "config.json" + workspace = tmp_path / "workspace" + config_path.write_text( + json.dumps( + { + "agents": { + "defaults": { + "maxTokens": 3333, + "memoryWindow": 50, + } + } + } + ), + encoding="utf-8", + ) + + monkeypatch.setattr("nanobot.config.loader.get_config_path", lambda: config_path) + monkeypatch.setattr("nanobot.cli.commands.get_workspace_path", lambda: workspace) + + result = runner.invoke(app, ["onboard"], input="n\n") + + assert result.exit_code == 0 + assert "contextWindowTokens" in result.stdout + saved = json.loads(config_path.read_text(encoding="utf-8")) + defaults = saved["agents"]["defaults"] + assert defaults["maxTokens"] == 3333 + assert defaults["contextWindowTokens"] == 65_536 + assert "memoryWindow" not in defaults diff --git a/tests/test_consolidate_offset.py b/tests/test_consolidate_offset.py index a3213dd..7d12338 100644 --- a/tests/test_consolidate_offset.py +++ b/tests/test_consolidate_offset.py @@ -480,226 +480,35 @@ class TestEmptyAndBoundarySessions: assert_messages_content(old_messages, 10, 34) -class TestConsolidationDeduplicationGuard: - """Test that consolidation tasks are deduplicated and serialized.""" +class TestNewCommandArchival: + """Test /new archival behavior with the simplified consolidation flow.""" - @pytest.mark.asyncio - async def test_consolidation_guard_prevents_duplicate_tasks(self, tmp_path: Path) -> None: - """Concurrent messages above memory_window spawn only one consolidation task.""" + @staticmethod + def _make_loop(tmp_path: Path): from nanobot.agent.loop import AgentLoop - from nanobot.bus.events import InboundMessage from nanobot.bus.queue import MessageBus from nanobot.providers.base import LLMResponse bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" + provider.estimate_prompt_tokens.return_value = (10_000, "test") loop = AgentLoop( - bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 + bus=bus, + provider=provider, + workspace=tmp_path, + model="test-model", + context_window_tokens=1, ) - - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) + loop.provider.chat_with_retry = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) - - session = loop.sessions.get_or_create("cli:test") - for i in range(15): - session.add_message("user", f"msg{i}") - session.add_message("assistant", f"resp{i}") - loop.sessions.save(session) - - consolidation_calls = 0 - - async def _fake_consolidate(_session, archive_all: bool = False) -> None: - nonlocal consolidation_calls - consolidation_calls += 1 - await asyncio.sleep(0.05) - - loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] - - msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") - await loop._process_message(msg) - await loop._process_message(msg) - await asyncio.sleep(0.1) - - assert consolidation_calls == 1, ( - f"Expected exactly 1 consolidation, got {consolidation_calls}" - ) - - @pytest.mark.asyncio - async def test_new_command_guard_prevents_concurrent_consolidation( - self, tmp_path: Path - ) -> None: - """/new command does not run consolidation concurrently with in-flight consolidation.""" - from nanobot.agent.loop import AgentLoop - from nanobot.bus.events import InboundMessage - from nanobot.bus.queue import MessageBus - from nanobot.providers.base import LLMResponse - - bus = MessageBus() - provider = MagicMock() - provider.get_default_model.return_value = "test-model" - loop = AgentLoop( - bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 - ) - - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) - loop.tools.get_definitions = MagicMock(return_value=[]) - - session = loop.sessions.get_or_create("cli:test") - for i in range(15): - session.add_message("user", f"msg{i}") - session.add_message("assistant", f"resp{i}") - loop.sessions.save(session) - - consolidation_calls = 0 - active = 0 - max_active = 0 - - async def _fake_consolidate(_session, archive_all: bool = False) -> None: - nonlocal consolidation_calls, active, max_active - consolidation_calls += 1 - active += 1 - max_active = max(max_active, active) - await asyncio.sleep(0.05) - active -= 1 - - loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] - - msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") - await loop._process_message(msg) - - new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") - await loop._process_message(new_msg) - await asyncio.sleep(0.1) - - assert consolidation_calls == 2, ( - f"Expected normal + /new consolidations, got {consolidation_calls}" - ) - assert max_active == 1, ( - f"Expected serialized consolidation, observed concurrency={max_active}" - ) - - @pytest.mark.asyncio - async def test_consolidation_tasks_are_referenced(self, tmp_path: Path) -> None: - """create_task results are tracked in _consolidation_tasks while in flight.""" - from nanobot.agent.loop import AgentLoop - from nanobot.bus.events import InboundMessage - from nanobot.bus.queue import MessageBus - from nanobot.providers.base import LLMResponse - - bus = MessageBus() - provider = MagicMock() - provider.get_default_model.return_value = "test-model" - loop = AgentLoop( - bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 - ) - - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) - loop.tools.get_definitions = MagicMock(return_value=[]) - - session = loop.sessions.get_or_create("cli:test") - for i in range(15): - session.add_message("user", f"msg{i}") - session.add_message("assistant", f"resp{i}") - loop.sessions.save(session) - - started = asyncio.Event() - - async def _slow_consolidate(_session, archive_all: bool = False) -> None: - started.set() - await asyncio.sleep(0.1) - - loop._consolidate_memory = _slow_consolidate # type: ignore[method-assign] - - msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") - await loop._process_message(msg) - - await started.wait() - assert len(loop._consolidation_tasks) == 1, "Task must be referenced while in-flight" - - await asyncio.sleep(0.15) - assert len(loop._consolidation_tasks) == 0, ( - "Task reference must be removed after completion" - ) - - @pytest.mark.asyncio - async def test_new_waits_for_inflight_consolidation_and_preserves_messages( - self, tmp_path: Path - ) -> None: - """/new waits for in-flight consolidation and archives before clear.""" - from nanobot.agent.loop import AgentLoop - from nanobot.bus.events import InboundMessage - from nanobot.bus.queue import MessageBus - from nanobot.providers.base import LLMResponse - - bus = MessageBus() - provider = MagicMock() - provider.get_default_model.return_value = "test-model" - loop = AgentLoop( - bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 - ) - - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) - loop.tools.get_definitions = MagicMock(return_value=[]) - - session = loop.sessions.get_or_create("cli:test") - for i in range(15): - session.add_message("user", f"msg{i}") - session.add_message("assistant", f"resp{i}") - loop.sessions.save(session) - - started = asyncio.Event() - release = asyncio.Event() - archived_count = 0 - - async def _fake_consolidate(sess, archive_all: bool = False) -> bool: - nonlocal archived_count - if archive_all: - archived_count = len(sess.messages) - return True - started.set() - await release.wait() - return True - - loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] - - msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") - await loop._process_message(msg) - await started.wait() - - new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") - pending_new = asyncio.create_task(loop._process_message(new_msg)) - - await asyncio.sleep(0.02) - assert not pending_new.done(), "/new should wait while consolidation is in-flight" - - release.set() - response = await pending_new - assert response is not None - assert "new session started" in response.content.lower() - assert archived_count > 0, "Expected /new archival to process a non-empty snapshot" - - session_after = loop.sessions.get_or_create("cli:test") - assert session_after.messages == [], "Session should be cleared after successful archival" + return loop @pytest.mark.asyncio async def test_new_does_not_clear_session_when_archive_fails(self, tmp_path: Path) -> None: - """/new must keep session data if archive step reports failure.""" - from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage - from nanobot.bus.queue import MessageBus - from nanobot.providers.base import LLMResponse - - bus = MessageBus() - provider = MagicMock() - provider.get_default_model.return_value = "test-model" - loop = AgentLoop( - bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 - ) - - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) - loop.tools.get_definitions = MagicMock(return_value=[]) + loop = self._make_loop(tmp_path) session = loop.sessions.get_or_create("cli:test") for i in range(5): session.add_message("user", f"msg{i}") @@ -707,111 +516,61 @@ class TestConsolidationDeduplicationGuard: loop.sessions.save(session) before_count = len(session.messages) - async def _failing_consolidate(sess, archive_all: bool = False) -> bool: - if archive_all: - return False - return True + async def _failing_consolidate(_messages) -> bool: + return False - loop._consolidate_memory = _failing_consolidate # type: ignore[method-assign] + loop.memory_consolidator.consolidate_messages = _failing_consolidate # type: ignore[method-assign] new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") response = await loop._process_message(new_msg) assert response is not None assert "failed" in response.content.lower() - session_after = loop.sessions.get_or_create("cli:test") - assert len(session_after.messages) == before_count, ( - "Session must remain intact when /new archival fails" - ) + assert len(loop.sessions.get_or_create("cli:test").messages) == before_count @pytest.mark.asyncio - async def test_new_archives_only_unconsolidated_messages_after_inflight_task( - self, tmp_path: Path - ) -> None: - """/new should archive only messages not yet consolidated by prior task.""" - from nanobot.agent.loop import AgentLoop + async def test_new_archives_only_unconsolidated_messages(self, tmp_path: Path) -> None: from nanobot.bus.events import InboundMessage - from nanobot.bus.queue import MessageBus - from nanobot.providers.base import LLMResponse - - bus = MessageBus() - provider = MagicMock() - provider.get_default_model.return_value = "test-model" - loop = AgentLoop( - bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 - ) - - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) - loop.tools.get_definitions = MagicMock(return_value=[]) + loop = self._make_loop(tmp_path) session = loop.sessions.get_or_create("cli:test") for i in range(15): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") + session.last_consolidated = len(session.messages) - 3 loop.sessions.save(session) - started = asyncio.Event() - release = asyncio.Event() archived_count = -1 - async def _fake_consolidate(sess, archive_all: bool = False) -> bool: + async def _fake_consolidate(messages) -> bool: nonlocal archived_count - if archive_all: - archived_count = len(sess.messages) - return True - - started.set() - await release.wait() - sess.last_consolidated = len(sess.messages) - 3 + archived_count = len(messages) return True - loop._consolidate_memory = _fake_consolidate # type: ignore[method-assign] - - msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="hello") - await loop._process_message(msg) - await started.wait() + loop.memory_consolidator.consolidate_messages = _fake_consolidate # type: ignore[method-assign] new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") - pending_new = asyncio.create_task(loop._process_message(new_msg)) - await asyncio.sleep(0.02) - assert not pending_new.done() - - release.set() - response = await pending_new + response = await loop._process_message(new_msg) assert response is not None assert "new session started" in response.content.lower() - assert archived_count == 3, ( - f"Expected only unconsolidated tail to archive, got {archived_count}" - ) + assert archived_count == 3 @pytest.mark.asyncio async def test_new_clears_session_and_responds(self, tmp_path: Path) -> None: - """/new clears session and returns confirmation.""" - from nanobot.agent.loop import AgentLoop from nanobot.bus.events import InboundMessage - from nanobot.bus.queue import MessageBus - from nanobot.providers.base import LLMResponse - - bus = MessageBus() - provider = MagicMock() - provider.get_default_model.return_value = "test-model" - loop = AgentLoop( - bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10 - ) - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) - loop.tools.get_definitions = MagicMock(return_value=[]) + loop = self._make_loop(tmp_path) session = loop.sessions.get_or_create("cli:test") for i in range(3): session.add_message("user", f"msg{i}") session.add_message("assistant", f"resp{i}") loop.sessions.save(session) - async def _ok_consolidate(sess, archive_all: bool = False) -> bool: + async def _ok_consolidate(_messages) -> bool: return True - loop._consolidate_memory = _ok_consolidate # type: ignore[method-assign] + loop.memory_consolidator.consolidate_messages = _ok_consolidate # type: ignore[method-assign] new_msg = InboundMessage(channel="cli", sender_id="user", chat_id="test", content="/new") response = await loop._process_message(new_msg) diff --git a/tests/test_loop_consolidation_tokens.py b/tests/test_loop_consolidation_tokens.py new file mode 100644 index 0000000..b0f3dda --- /dev/null +++ b/tests/test_loop_consolidation_tokens.py @@ -0,0 +1,190 @@ +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from nanobot.agent.loop import AgentLoop +import nanobot.agent.memory as memory_module +from nanobot.bus.queue import MessageBus +from nanobot.providers.base import LLMResponse + + +def _make_loop(tmp_path, *, estimated_tokens: int, context_window_tokens: int) -> AgentLoop: + provider = MagicMock() + provider.get_default_model.return_value = "test-model" + provider.estimate_prompt_tokens.return_value = (estimated_tokens, "test-counter") + provider.chat_with_retry = AsyncMock(return_value=LLMResponse(content="ok", tool_calls=[])) + + loop = AgentLoop( + bus=MessageBus(), + provider=provider, + workspace=tmp_path, + model="test-model", + context_window_tokens=context_window_tokens, + ) + loop.tools.get_definitions = MagicMock(return_value=[]) + return loop + + +@pytest.mark.asyncio +async def test_prompt_below_threshold_does_not_consolidate(tmp_path) -> None: + loop = _make_loop(tmp_path, estimated_tokens=100, context_window_tokens=200) + loop.memory_consolidator.consolidate_messages = AsyncMock(return_value=True) # type: ignore[method-assign] + + await loop.process_direct("hello", session_key="cli:test") + + loop.memory_consolidator.consolidate_messages.assert_not_awaited() + + +@pytest.mark.asyncio +async def test_prompt_above_threshold_triggers_consolidation(tmp_path, monkeypatch) -> None: + loop = _make_loop(tmp_path, estimated_tokens=1000, context_window_tokens=200) + loop.memory_consolidator.consolidate_messages = AsyncMock(return_value=True) # type: ignore[method-assign] + session = loop.sessions.get_or_create("cli:test") + session.messages = [ + {"role": "user", "content": "u1", "timestamp": "2026-01-01T00:00:00"}, + {"role": "assistant", "content": "a1", "timestamp": "2026-01-01T00:00:01"}, + {"role": "user", "content": "u2", "timestamp": "2026-01-01T00:00:02"}, + ] + loop.sessions.save(session) + monkeypatch.setattr(memory_module, "estimate_message_tokens", lambda _message: 500) + + await loop.process_direct("hello", session_key="cli:test") + + assert loop.memory_consolidator.consolidate_messages.await_count >= 1 + + +@pytest.mark.asyncio +async def test_prompt_above_threshold_archives_until_next_user_boundary(tmp_path, monkeypatch) -> None: + loop = _make_loop(tmp_path, estimated_tokens=1000, context_window_tokens=200) + loop.memory_consolidator.consolidate_messages = AsyncMock(return_value=True) # type: ignore[method-assign] + + session = loop.sessions.get_or_create("cli:test") + session.messages = [ + {"role": "user", "content": "u1", "timestamp": "2026-01-01T00:00:00"}, + {"role": "assistant", "content": "a1", "timestamp": "2026-01-01T00:00:01"}, + {"role": "user", "content": "u2", "timestamp": "2026-01-01T00:00:02"}, + {"role": "assistant", "content": "a2", "timestamp": "2026-01-01T00:00:03"}, + {"role": "user", "content": "u3", "timestamp": "2026-01-01T00:00:04"}, + ] + loop.sessions.save(session) + + token_map = {"u1": 120, "a1": 120, "u2": 120, "a2": 120, "u3": 120} + monkeypatch.setattr(memory_module, "estimate_message_tokens", lambda message: token_map[message["content"]]) + + await loop.memory_consolidator.maybe_consolidate_by_tokens(session) + + archived_chunk = loop.memory_consolidator.consolidate_messages.await_args.args[0] + assert [message["content"] for message in archived_chunk] == ["u1", "a1", "u2", "a2"] + assert session.last_consolidated == 4 + + +@pytest.mark.asyncio +async def test_consolidation_loops_until_target_met(tmp_path, monkeypatch) -> None: + """Verify maybe_consolidate_by_tokens keeps looping until under threshold.""" + loop = _make_loop(tmp_path, estimated_tokens=0, context_window_tokens=200) + loop.memory_consolidator.consolidate_messages = AsyncMock(return_value=True) # type: ignore[method-assign] + + session = loop.sessions.get_or_create("cli:test") + session.messages = [ + {"role": "user", "content": "u1", "timestamp": "2026-01-01T00:00:00"}, + {"role": "assistant", "content": "a1", "timestamp": "2026-01-01T00:00:01"}, + {"role": "user", "content": "u2", "timestamp": "2026-01-01T00:00:02"}, + {"role": "assistant", "content": "a2", "timestamp": "2026-01-01T00:00:03"}, + {"role": "user", "content": "u3", "timestamp": "2026-01-01T00:00:04"}, + {"role": "assistant", "content": "a3", "timestamp": "2026-01-01T00:00:05"}, + {"role": "user", "content": "u4", "timestamp": "2026-01-01T00:00:06"}, + ] + loop.sessions.save(session) + + call_count = [0] + def mock_estimate(_session): + call_count[0] += 1 + if call_count[0] == 1: + return (500, "test") + if call_count[0] == 2: + return (300, "test") + return (80, "test") + + loop.memory_consolidator.estimate_session_prompt_tokens = mock_estimate # type: ignore[method-assign] + monkeypatch.setattr(memory_module, "estimate_message_tokens", lambda _m: 100) + + await loop.memory_consolidator.maybe_consolidate_by_tokens(session) + + assert loop.memory_consolidator.consolidate_messages.await_count == 2 + assert session.last_consolidated == 6 + + +@pytest.mark.asyncio +async def test_consolidation_continues_below_trigger_until_half_target(tmp_path, monkeypatch) -> None: + """Once triggered, consolidation should continue until it drops below half threshold.""" + loop = _make_loop(tmp_path, estimated_tokens=0, context_window_tokens=200) + loop.memory_consolidator.consolidate_messages = AsyncMock(return_value=True) # type: ignore[method-assign] + + session = loop.sessions.get_or_create("cli:test") + session.messages = [ + {"role": "user", "content": "u1", "timestamp": "2026-01-01T00:00:00"}, + {"role": "assistant", "content": "a1", "timestamp": "2026-01-01T00:00:01"}, + {"role": "user", "content": "u2", "timestamp": "2026-01-01T00:00:02"}, + {"role": "assistant", "content": "a2", "timestamp": "2026-01-01T00:00:03"}, + {"role": "user", "content": "u3", "timestamp": "2026-01-01T00:00:04"}, + {"role": "assistant", "content": "a3", "timestamp": "2026-01-01T00:00:05"}, + {"role": "user", "content": "u4", "timestamp": "2026-01-01T00:00:06"}, + ] + loop.sessions.save(session) + + call_count = [0] + + def mock_estimate(_session): + call_count[0] += 1 + if call_count[0] == 1: + return (500, "test") + if call_count[0] == 2: + return (150, "test") + return (80, "test") + + loop.memory_consolidator.estimate_session_prompt_tokens = mock_estimate # type: ignore[method-assign] + monkeypatch.setattr(memory_module, "estimate_message_tokens", lambda _m: 100) + + await loop.memory_consolidator.maybe_consolidate_by_tokens(session) + + assert loop.memory_consolidator.consolidate_messages.await_count == 2 + assert session.last_consolidated == 6 + + +@pytest.mark.asyncio +async def test_preflight_consolidation_before_llm_call(tmp_path, monkeypatch) -> None: + """Verify preflight consolidation runs before the LLM call in process_direct.""" + order: list[str] = [] + + loop = _make_loop(tmp_path, estimated_tokens=0, context_window_tokens=200) + + async def track_consolidate(messages): + order.append("consolidate") + return True + loop.memory_consolidator.consolidate_messages = track_consolidate # type: ignore[method-assign] + + async def track_llm(*args, **kwargs): + order.append("llm") + return LLMResponse(content="ok", tool_calls=[]) + loop.provider.chat_with_retry = track_llm + + session = loop.sessions.get_or_create("cli:test") + session.messages = [ + {"role": "user", "content": "u1", "timestamp": "2026-01-01T00:00:00"}, + {"role": "assistant", "content": "a1", "timestamp": "2026-01-01T00:00:01"}, + {"role": "user", "content": "u2", "timestamp": "2026-01-01T00:00:02"}, + ] + loop.sessions.save(session) + monkeypatch.setattr(memory_module, "estimate_message_tokens", lambda _m: 500) + + call_count = [0] + def mock_estimate(_session): + call_count[0] += 1 + return (1000 if call_count[0] <= 1 else 80, "test") + loop.memory_consolidator.estimate_session_prompt_tokens = mock_estimate # type: ignore[method-assign] + + await loop.process_direct("hello", session_key="cli:test") + + assert "consolidate" in order + assert "llm" in order + assert order.index("consolidate") < order.index("llm") diff --git a/tests/test_memory_consolidation_types.py b/tests/test_memory_consolidation_types.py index 2605bf7..0263f01 100644 --- a/tests/test_memory_consolidation_types.py +++ b/tests/test_memory_consolidation_types.py @@ -7,7 +7,7 @@ tool call response, it should serialize them to JSON instead of raising TypeErro import json from pathlib import Path -from unittest.mock import AsyncMock, MagicMock +from unittest.mock import AsyncMock import pytest @@ -15,15 +15,12 @@ from nanobot.agent.memory import MemoryStore from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest -def _make_session(message_count: int = 30, memory_window: int = 50): - """Create a mock session with messages.""" - session = MagicMock() - session.messages = [ +def _make_messages(message_count: int = 30): + """Create a list of mock messages.""" + return [ {"role": "user", "content": f"msg{i}", "timestamp": "2026-01-01 00:00"} for i in range(message_count) ] - session.last_consolidated = 0 - return session def _make_tool_response(history_entry, memory_update): @@ -74,9 +71,9 @@ class TestMemoryConsolidationTypeHandling: ) ) provider.chat_with_retry = provider.chat - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is True assert store.history_file.exists() @@ -95,9 +92,9 @@ class TestMemoryConsolidationTypeHandling: ) ) provider.chat_with_retry = provider.chat - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is True assert store.history_file.exists() @@ -131,9 +128,9 @@ class TestMemoryConsolidationTypeHandling: ) provider.chat = AsyncMock(return_value=response) provider.chat_with_retry = provider.chat - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is True assert "User discussed testing." in store.history_file.read_text() @@ -147,22 +144,22 @@ class TestMemoryConsolidationTypeHandling: return_value=LLMResponse(content="I summarized the conversation.", tool_calls=[]) ) provider.chat_with_retry = provider.chat - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is False assert not store.history_file.exists() @pytest.mark.asyncio - async def test_skips_when_few_messages(self, tmp_path: Path) -> None: - """Consolidation should be a no-op when messages < keep_count.""" + async def test_skips_when_message_chunk_is_empty(self, tmp_path: Path) -> None: + """Consolidation should be a no-op when the selected chunk is empty.""" store = MemoryStore(tmp_path) provider = AsyncMock() provider.chat_with_retry = provider.chat - session = _make_session(message_count=10) + messages: list[dict] = [] - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is True provider.chat.assert_not_called() @@ -189,9 +186,9 @@ class TestMemoryConsolidationTypeHandling: ) provider.chat = AsyncMock(return_value=response) provider.chat_with_retry = provider.chat - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is True assert "User discussed testing." in store.history_file.read_text() @@ -215,9 +212,9 @@ class TestMemoryConsolidationTypeHandling: ) provider.chat = AsyncMock(return_value=response) provider.chat_with_retry = provider.chat - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is False @@ -239,9 +236,9 @@ class TestMemoryConsolidationTypeHandling: ) provider.chat = AsyncMock(return_value=response) provider.chat_with_retry = provider.chat - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is False @@ -255,7 +252,7 @@ class TestMemoryConsolidationTypeHandling: memory_update="# Memory\nUser likes testing.", ), ]) - session = _make_session(message_count=60) + messages = _make_messages(message_count=60) delays: list[int] = [] async def _fake_sleep(delay: int) -> None: @@ -263,7 +260,7 @@ class TestMemoryConsolidationTypeHandling: monkeypatch.setattr("nanobot.providers.base.asyncio.sleep", _fake_sleep) - result = await store.consolidate(session, provider, "test-model", memory_window=50) + result = await store.consolidate(messages, provider, "test-model") assert result is True assert provider.calls == 2 diff --git a/tests/test_message_tool_suppress.py b/tests/test_message_tool_suppress.py index 63b0fd1..1091de4 100644 --- a/tests/test_message_tool_suppress.py +++ b/tests/test_message_tool_suppress.py @@ -16,7 +16,7 @@ def _make_loop(tmp_path: Path) -> AgentLoop: bus = MessageBus() provider = MagicMock() provider.get_default_model.return_value = "test-model" - return AgentLoop(bus=bus, provider=provider, workspace=tmp_path, model="test-model", memory_window=10) + return AgentLoop(bus=bus, provider=provider, workspace=tmp_path, model="test-model") class TestMessageToolSuppressLogic: @@ -33,7 +33,7 @@ class TestMessageToolSuppressLogic: LLMResponse(content="", tool_calls=[tool_call]), LLMResponse(content="Done", tool_calls=[]), ]) - loop.provider.chat = AsyncMock(side_effect=lambda *a, **kw: next(calls)) + loop.provider.chat_with_retry = AsyncMock(side_effect=lambda *a, **kw: next(calls)) loop.tools.get_definitions = MagicMock(return_value=[]) sent: list[OutboundMessage] = [] @@ -58,7 +58,7 @@ class TestMessageToolSuppressLogic: LLMResponse(content="", tool_calls=[tool_call]), LLMResponse(content="I've sent the email.", tool_calls=[]), ]) - loop.provider.chat = AsyncMock(side_effect=lambda *a, **kw: next(calls)) + loop.provider.chat_with_retry = AsyncMock(side_effect=lambda *a, **kw: next(calls)) loop.tools.get_definitions = MagicMock(return_value=[]) sent: list[OutboundMessage] = [] @@ -77,7 +77,7 @@ class TestMessageToolSuppressLogic: @pytest.mark.asyncio async def test_not_suppress_when_no_message_tool_used(self, tmp_path: Path) -> None: loop = _make_loop(tmp_path) - loop.provider.chat = AsyncMock(return_value=LLMResponse(content="Hello!", tool_calls=[])) + loop.provider.chat_with_retry = AsyncMock(return_value=LLMResponse(content="Hello!", tool_calls=[])) loop.tools.get_definitions = MagicMock(return_value=[]) msg = InboundMessage(channel="feishu", sender_id="user1", chat_id="chat123", content="Hi") @@ -98,7 +98,7 @@ class TestMessageToolSuppressLogic: ), LLMResponse(content="Done", tool_calls=[]), ]) - loop.provider.chat = AsyncMock(side_effect=lambda *a, **kw: next(calls)) + loop.provider.chat_with_retry = AsyncMock(side_effect=lambda *a, **kw: next(calls)) loop.tools.get_definitions = MagicMock(return_value=[]) loop.tools.execute = AsyncMock(return_value="ok")