HeartbeatService was refactored from free-text HEARTBEAT_OK token matching to a structured two-phase design (LLM tool call for skip/run decision, then execution). The tests still used the old on_heartbeat callback constructor and HEARTBEAT_OK_TOKEN import. - Remove obsolete test_heartbeat_ok_detection test - Update test_start_is_idempotent to use new provider+model constructor - Add tests for _decide() skip path, trigger_now() run/skip paths Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
118 lines
3.0 KiB
Python
118 lines
3.0 KiB
Python
import asyncio
|
|
|
|
import pytest
|
|
|
|
from nanobot.heartbeat.service import HeartbeatService
|
|
from nanobot.providers.base import LLMResponse, ToolCallRequest
|
|
|
|
|
|
class DummyProvider:
|
|
def __init__(self, responses: list[LLMResponse]):
|
|
self._responses = list(responses)
|
|
|
|
async def chat(self, *args, **kwargs) -> LLMResponse:
|
|
if self._responses:
|
|
return self._responses.pop(0)
|
|
return LLMResponse(content="", tool_calls=[])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_start_is_idempotent(tmp_path) -> None:
|
|
provider = DummyProvider([])
|
|
|
|
service = HeartbeatService(
|
|
workspace=tmp_path,
|
|
provider=provider,
|
|
model="openai/gpt-4o-mini",
|
|
interval_s=9999,
|
|
enabled=True,
|
|
)
|
|
|
|
await service.start()
|
|
first_task = service._task
|
|
await service.start()
|
|
|
|
assert service._task is first_task
|
|
|
|
service.stop()
|
|
await asyncio.sleep(0)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_decide_returns_skip_when_no_tool_call(tmp_path) -> None:
|
|
provider = DummyProvider([LLMResponse(content="no tool call", tool_calls=[])])
|
|
service = HeartbeatService(
|
|
workspace=tmp_path,
|
|
provider=provider,
|
|
model="openai/gpt-4o-mini",
|
|
)
|
|
|
|
action, tasks = await service._decide("heartbeat content")
|
|
assert action == "skip"
|
|
assert tasks == ""
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_trigger_now_executes_when_decision_is_run(tmp_path) -> None:
|
|
(tmp_path / "HEARTBEAT.md").write_text("- [ ] do thing", encoding="utf-8")
|
|
|
|
provider = DummyProvider([
|
|
LLMResponse(
|
|
content="",
|
|
tool_calls=[
|
|
ToolCallRequest(
|
|
id="hb_1",
|
|
name="heartbeat",
|
|
arguments={"action": "run", "tasks": "check open tasks"},
|
|
)
|
|
],
|
|
)
|
|
])
|
|
|
|
called_with: list[str] = []
|
|
|
|
async def _on_execute(tasks: str) -> str:
|
|
called_with.append(tasks)
|
|
return "done"
|
|
|
|
service = HeartbeatService(
|
|
workspace=tmp_path,
|
|
provider=provider,
|
|
model="openai/gpt-4o-mini",
|
|
on_execute=_on_execute,
|
|
)
|
|
|
|
result = await service.trigger_now()
|
|
assert result == "done"
|
|
assert called_with == ["check open tasks"]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_trigger_now_returns_none_when_decision_is_skip(tmp_path) -> None:
|
|
(tmp_path / "HEARTBEAT.md").write_text("- [ ] do thing", encoding="utf-8")
|
|
|
|
provider = DummyProvider([
|
|
LLMResponse(
|
|
content="",
|
|
tool_calls=[
|
|
ToolCallRequest(
|
|
id="hb_1",
|
|
name="heartbeat",
|
|
arguments={"action": "skip"},
|
|
)
|
|
],
|
|
)
|
|
])
|
|
|
|
async def _on_execute(tasks: str) -> str:
|
|
return tasks
|
|
|
|
service = HeartbeatService(
|
|
workspace=tmp_path,
|
|
provider=provider,
|
|
model="openai/gpt-4o-mini",
|
|
on_execute=_on_execute,
|
|
)
|
|
|
|
assert await service.trigger_now() is None
|