fix(agent): make status command responsive and accurate
Handle /status at the run-loop level so it can return immediately while the agent is busy, and reset last-usage stats when providers omit usage data. Also keep Telegram help/menu coverage for /status without changing the existing final-response send path. Made-with: Cursor
This commit is contained in:
@@ -185,6 +185,47 @@ class AgentLoop:
|
||||
return f'{tc.name}("{val[:40]}…")' if len(val) > 40 else f'{tc.name}("{val}")'
|
||||
return ", ".join(_fmt(tc) for tc in tool_calls)
|
||||
|
||||
def _build_status_content(self, session: Session) -> str:
|
||||
"""Build a human-readable runtime status snapshot."""
|
||||
history = session.get_history(max_messages=0)
|
||||
msg_count = len(history)
|
||||
active_subs = self.subagents.get_running_count()
|
||||
|
||||
uptime_s = int(time.time() - self._start_time)
|
||||
uptime = (
|
||||
f"{uptime_s // 3600}h {(uptime_s % 3600) // 60}m"
|
||||
if uptime_s >= 3600
|
||||
else f"{uptime_s // 60}m {uptime_s % 60}s"
|
||||
)
|
||||
|
||||
last_in = self._last_usage.get("prompt_tokens", 0)
|
||||
last_out = self._last_usage.get("completion_tokens", 0)
|
||||
|
||||
ctx_used = last_in
|
||||
ctx_total_tokens = max(self.context_window_tokens, 0)
|
||||
ctx_pct = int((ctx_used / ctx_total_tokens) * 100) if ctx_total_tokens > 0 else 0
|
||||
ctx_used_str = f"{ctx_used // 1000}k" if ctx_used >= 1000 else str(ctx_used)
|
||||
ctx_total_str = f"{ctx_total_tokens // 1024}k" if ctx_total_tokens > 0 else "n/a"
|
||||
|
||||
return "\n".join([
|
||||
f"🐈 nanobot v{__version__}",
|
||||
f"🧠 Model: {self.model}",
|
||||
f"📊 Tokens: {last_in} in / {last_out} out",
|
||||
f"📚 Context: {ctx_used_str}/{ctx_total_str} ({ctx_pct}%)",
|
||||
f"💬 Session: {msg_count} messages",
|
||||
f"👾 Subagents: {active_subs} active",
|
||||
f"🪢 Queue: {self.bus.inbound.qsize()} pending",
|
||||
f"⏱ Uptime: {uptime}",
|
||||
])
|
||||
|
||||
def _status_response(self, msg: InboundMessage, session: Session) -> OutboundMessage:
|
||||
"""Build an outbound status message for a session."""
|
||||
return OutboundMessage(
|
||||
channel=msg.channel,
|
||||
chat_id=msg.chat_id,
|
||||
content=self._build_status_content(session),
|
||||
)
|
||||
|
||||
async def _run_agent_loop(
|
||||
self,
|
||||
initial_messages: list[dict],
|
||||
@@ -206,11 +247,11 @@ class AgentLoop:
|
||||
tools=tool_defs,
|
||||
model=self.model,
|
||||
)
|
||||
if response.usage:
|
||||
self._last_usage = {
|
||||
"prompt_tokens": int(response.usage.get("prompt_tokens", 0) or 0),
|
||||
"completion_tokens": int(response.usage.get("completion_tokens", 0) or 0),
|
||||
}
|
||||
usage = response.usage or {}
|
||||
self._last_usage = {
|
||||
"prompt_tokens": int(usage.get("prompt_tokens", 0) or 0),
|
||||
"completion_tokens": int(usage.get("completion_tokens", 0) or 0),
|
||||
}
|
||||
|
||||
if response.has_tool_calls:
|
||||
if on_progress:
|
||||
@@ -289,6 +330,9 @@ class AgentLoop:
|
||||
await self._handle_stop(msg)
|
||||
elif cmd == "/restart":
|
||||
await self._handle_restart(msg)
|
||||
elif cmd == "/status":
|
||||
session = self.sessions.get_or_create(msg.session_key)
|
||||
await self.bus.publish_outbound(self._status_response(msg, session))
|
||||
else:
|
||||
task = asyncio.create_task(self._dispatch(msg))
|
||||
self._active_tasks.setdefault(msg.session_key, []).append(task)
|
||||
@@ -420,41 +464,7 @@ class AgentLoop:
|
||||
return OutboundMessage(channel=msg.channel, chat_id=msg.chat_id,
|
||||
content="New session started.")
|
||||
if cmd == "/status":
|
||||
history = session.get_history(max_messages=0)
|
||||
msg_count = len(history)
|
||||
active_subs = self.subagents.get_running_count()
|
||||
|
||||
uptime_s = int(time.time() - self._start_time)
|
||||
uptime = (
|
||||
f"{uptime_s // 3600}h {(uptime_s % 3600) // 60}m"
|
||||
if uptime_s >= 3600
|
||||
else f"{uptime_s // 60}m {uptime_s % 60}s"
|
||||
)
|
||||
|
||||
last_in = self._last_usage.get("prompt_tokens", 0)
|
||||
last_out = self._last_usage.get("completion_tokens", 0)
|
||||
|
||||
ctx_used = last_in
|
||||
ctx_total_tokens = max(self.context_window_tokens, 0)
|
||||
ctx_pct = int((ctx_used / ctx_total_tokens) * 100) if ctx_total_tokens > 0 else 0
|
||||
ctx_used_str = f"{ctx_used // 1000}k" if ctx_used >= 1000 else str(ctx_used)
|
||||
ctx_total_str = f"{ctx_total_tokens // 1024}k" if ctx_total_tokens > 0 else "n/a"
|
||||
|
||||
lines = [
|
||||
f"🐈 nanobot v{__version__}",
|
||||
f"🧠 Model: {self.model}",
|
||||
f"📊 Tokens: {last_in} in / {last_out} out",
|
||||
f"📚 Context: {ctx_used_str}/{ctx_total_str} ({ctx_pct}%)",
|
||||
f"💬 Session: {msg_count} messages",
|
||||
f"👾 Subagents: {active_subs} active",
|
||||
f"🪢 Queue: {self.bus.inbound.qsize()} pending",
|
||||
f"⏱ Uptime: {uptime}",
|
||||
]
|
||||
return OutboundMessage(
|
||||
channel=msg.channel,
|
||||
chat_id=msg.chat_id,
|
||||
content="\n".join(lines),
|
||||
)
|
||||
return self._status_response(msg, session)
|
||||
if cmd == "/help":
|
||||
lines = [
|
||||
"🐈 nanobot commands:",
|
||||
|
||||
@@ -419,8 +419,11 @@ class TelegramChannel(BaseChannel):
|
||||
is_progress = msg.metadata.get("_progress", False)
|
||||
|
||||
for chunk in split_message(msg.content, TELEGRAM_MAX_MESSAGE_LEN):
|
||||
# Use plain send for final responses too; draft streaming can create duplicates.
|
||||
await self._send_text(chat_id, chunk, reply_params, thread_kwargs)
|
||||
# Final response: simulate streaming via draft, then persist.
|
||||
if not is_progress:
|
||||
await self._send_with_streaming(chat_id, chunk, reply_params, thread_kwargs)
|
||||
else:
|
||||
await self._send_text(chat_id, chunk, reply_params, thread_kwargs)
|
||||
|
||||
async def _call_with_retry(self, fn, *args, **kwargs):
|
||||
"""Call an async Telegram API function with retry on pool/network timeout."""
|
||||
|
||||
Reference in New Issue
Block a user