From 52222a9f8475b64879d50f8925587206a3ffc774 Mon Sep 17 00:00:00 2001 From: fengxiaohu <975326527@qq.com> Date: Sat, 28 Feb 2026 18:46:15 +0800 Subject: [PATCH 01/14] fix(providers): allow reasoning_content in message history for thinking models --- nanobot/providers/litellm_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py index 7402a2b..03a6c4d 100644 --- a/nanobot/providers/litellm_provider.py +++ b/nanobot/providers/litellm_provider.py @@ -13,7 +13,7 @@ from nanobot.providers.registry import find_by_model, find_gateway # Standard OpenAI chat-completion message keys; extras (e.g. reasoning_content) are stripped for strict providers. -_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name"}) +_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content"}) class LiteLLMProvider(LLMProvider): From cfc55d626afa86a9bdf4c120d1ad8882a063244c Mon Sep 17 00:00:00 2001 From: "siyuan.qsy" Date: Sat, 28 Feb 2026 19:00:22 +0800 Subject: [PATCH 02/14] feat(dingtalk): send images as image messages, keep files as attachments --- nanobot/channels/dingtalk.py | 290 +++++++++++++++++++++++++++++++---- 1 file changed, 263 insertions(+), 27 deletions(-) diff --git a/nanobot/channels/dingtalk.py b/nanobot/channels/dingtalk.py index 09c7714..53a9bb8 100644 --- a/nanobot/channels/dingtalk.py +++ b/nanobot/channels/dingtalk.py @@ -2,8 +2,12 @@ import asyncio import json +import mimetypes +import os import time +from pathlib import Path from typing import Any +from urllib.parse import unquote, urlparse from loguru import logger import httpx @@ -96,6 +100,9 @@ class DingTalkChannel(BaseChannel): """ name = "dingtalk" + _IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".gif", ".bmp", ".webp"} + _AUDIO_EXTS = {".amr", ".mp3", ".wav", ".ogg", ".m4a", ".aac"} + _VIDEO_EXTS = {".mp4", ".mov", ".avi", ".mkv", ".webm"} def __init__(self, config: DingTalkConfig, bus: MessageBus): super().__init__(config, bus) @@ -191,40 +198,269 @@ class DingTalkChannel(BaseChannel): logger.error("Failed to get DingTalk access token: {}", e) return None + @staticmethod + def _is_http_url(value: str) -> bool: + low = value.lower() + return low.startswith("http://") or low.startswith("https://") + + def _guess_upload_type(self, media_ref: str) -> str: + parsed = urlparse(media_ref) + path = parsed.path if parsed.scheme else media_ref + ext = Path(path).suffix.lower() + if ext in self._IMAGE_EXTS: + return "image" + if ext in self._AUDIO_EXTS: + return "voice" + if ext in self._VIDEO_EXTS: + return "video" + return "file" + + def _guess_filename(self, media_ref: str, upload_type: str) -> str: + parsed = urlparse(media_ref) + path = parsed.path if parsed.scheme else media_ref + name = os.path.basename(path) + if name: + return name + fallback = { + "image": "image.jpg", + "voice": "audio.amr", + "video": "video.mp4", + "file": "file.bin", + } + return fallback.get(upload_type, "file.bin") + + async def _read_media_bytes( + self, + media_ref: str, + ) -> tuple[bytes | None, str | None, str | None]: + if not media_ref: + return None, None, None + + if self._is_http_url(media_ref): + if not self._http: + return None, None, None + try: + resp = await self._http.get(media_ref, follow_redirects=True) + if resp.status_code >= 400: + logger.warning( + "DingTalk media download failed status={} ref={}", + resp.status_code, + media_ref, + ) + return None, None, None + content_type = (resp.headers.get("content-type") or "").split(";")[0].strip() + filename = self._guess_filename(media_ref, self._guess_upload_type(media_ref)) + return resp.content, filename, content_type or None + except Exception as e: + logger.error("DingTalk media download error ref={} err={}", media_ref, e) + return None, None, None + + try: + if media_ref.startswith("file://"): + parsed = urlparse(media_ref) + local_path = Path(unquote(parsed.path)) + else: + local_path = Path(os.path.expanduser(media_ref)) + if not local_path.is_file(): + logger.warning("DingTalk media file not found: {}", local_path) + return None, None, None + data = await asyncio.to_thread(local_path.read_bytes) + content_type = mimetypes.guess_type(local_path.name)[0] + return data, local_path.name, content_type + except Exception as e: + logger.error("DingTalk media read error ref={} err={}", media_ref, e) + return None, None, None + + async def _upload_media( + self, + token: str, + data: bytes, + media_type: str, + filename: str, + content_type: str | None, + ) -> str | None: + if not self._http: + return None + url = f"https://oapi.dingtalk.com/media/upload?access_token={token}&type={media_type}" + mime = content_type or mimetypes.guess_type(filename)[0] or "application/octet-stream" + files = {"media": (filename, data, mime)} + + try: + resp = await self._http.post(url, files=files) + text = resp.text + try: + result = resp.json() + except Exception: + result = {} + if resp.status_code >= 400: + logger.error( + "DingTalk media upload failed status={} type={} body={}", + resp.status_code, + media_type, + text[:500], + ) + return None + errcode = result.get("errcode", 0) + if errcode != 0: + logger.error( + "DingTalk media upload api error type={} errcode={} body={}", + media_type, + errcode, + text[:500], + ) + return None + media_id = ( + result.get("media_id") + or result.get("mediaId") + or (result.get("result") or {}).get("media_id") + or (result.get("result") or {}).get("mediaId") + ) + if not media_id: + logger.error("DingTalk media upload missing media_id body={}", text[:500]) + return None + return str(media_id) + except Exception as e: + logger.error("DingTalk media upload error type={} err={}", media_type, e) + return None + + async def _send_batch_message( + self, + token: str, + chat_id: str, + msg_key: str, + msg_param: dict[str, Any], + ) -> bool: + if not self._http: + logger.warning("DingTalk HTTP client not initialized, cannot send") + return False + + url = "https://api.dingtalk.com/v1.0/robot/oToMessages/batchSend" + headers = {"x-acs-dingtalk-access-token": token} + payload = { + "robotCode": self.config.client_id, + "userIds": [chat_id], + "msgKey": msg_key, + "msgParam": json.dumps(msg_param, ensure_ascii=False), + } + + try: + resp = await self._http.post(url, json=payload, headers=headers) + body = resp.text + if resp.status_code != 200: + logger.error( + "DingTalk send failed msgKey={} status={} body={}", + msg_key, + resp.status_code, + body[:500], + ) + return False + try: + result = resp.json() + except Exception: + result = {} + errcode = result.get("errcode") + if errcode not in (None, 0): + logger.error( + "DingTalk send api error msgKey={} errcode={} body={}", + msg_key, + errcode, + body[:500], + ) + return False + logger.debug("DingTalk message sent to {} with msgKey={}", chat_id, msg_key) + return True + except Exception as e: + logger.error("Error sending DingTalk message msgKey={} err={}", msg_key, e) + return False + + async def _send_markdown_text(self, token: str, chat_id: str, content: str) -> bool: + return await self._send_batch_message( + token, + chat_id, + "sampleMarkdown", + {"text": content, "title": "Nanobot Reply"}, + ) + + async def _send_media_ref(self, token: str, chat_id: str, media_ref: str) -> bool: + media_ref = (media_ref or "").strip() + if not media_ref: + return True + + upload_type = self._guess_upload_type(media_ref) + if upload_type == "image" and self._is_http_url(media_ref): + ok = await self._send_batch_message( + token, + chat_id, + "sampleImageMsg", + {"photoURL": media_ref}, + ) + if ok: + return True + logger.warning("DingTalk image url send failed, trying upload fallback: {}", media_ref) + + data, filename, content_type = await self._read_media_bytes(media_ref) + if not data: + logger.error("DingTalk media read failed: {}", media_ref) + return False + + filename = filename or self._guess_filename(media_ref, upload_type) + file_type = Path(filename).suffix.lower().lstrip(".") + if not file_type: + guessed = mimetypes.guess_extension(content_type or "") + file_type = (guessed or ".bin").lstrip(".") + if file_type == "jpeg": + file_type = "jpg" + + media_id = await self._upload_media( + token=token, + data=data, + media_type=upload_type, + filename=filename, + content_type=content_type, + ) + if not media_id: + return False + + if upload_type == "image": + # Verified in production: sampleImageMsg accepts media_id in photoURL. + ok = await self._send_batch_message( + token, + chat_id, + "sampleImageMsg", + {"photoURL": media_id}, + ) + if ok: + return True + logger.warning("DingTalk image media_id send failed, falling back to file: {}", media_ref) + + return await self._send_batch_message( + token, + chat_id, + "sampleFile", + {"mediaId": media_id, "fileName": filename, "fileType": file_type}, + ) + async def send(self, msg: OutboundMessage) -> None: """Send a message through DingTalk.""" token = await self._get_access_token() if not token: return - # oToMessages/batchSend: sends to individual users (private chat) - # https://open.dingtalk.com/document/orgapp/robot-batch-send-messages - url = "https://api.dingtalk.com/v1.0/robot/oToMessages/batchSend" + if msg.content and msg.content.strip(): + await self._send_markdown_text(token, msg.chat_id, msg.content.strip()) - headers = {"x-acs-dingtalk-access-token": token} - - data = { - "robotCode": self.config.client_id, - "userIds": [msg.chat_id], # chat_id is the user's staffId - "msgKey": "sampleMarkdown", - "msgParam": json.dumps({ - "text": msg.content, - "title": "Nanobot Reply", - }, ensure_ascii=False), - } - - if not self._http: - logger.warning("DingTalk HTTP client not initialized, cannot send") - return - - try: - resp = await self._http.post(url, json=data, headers=headers) - if resp.status_code != 200: - logger.error("DingTalk send failed: {}", resp.text) - else: - logger.debug("DingTalk message sent to {}", msg.chat_id) - except Exception as e: - logger.error("Error sending DingTalk message: {}", e) + for media_ref in msg.media or []: + ok = await self._send_media_ref(token, msg.chat_id, media_ref) + if ok: + continue + logger.error("DingTalk media send failed for {}", media_ref) + # Send visible fallback so failures are observable by the user. + filename = self._guess_filename(media_ref, self._guess_upload_type(media_ref)) + await self._send_markdown_text( + token, + msg.chat_id, + f"[Attachment send failed: {filename}]", + ) async def _on_message(self, content: str, sender_id: str, sender_name: str) -> None: """Handle incoming message (called by NanobotDingTalkHandler). From b3af59fc8e09fd6acc0af8f0bddcadec64ce7d42 Mon Sep 17 00:00:00 2001 From: "zhangxiaoyu.york" Date: Sun, 1 Mar 2026 00:20:32 +0800 Subject: [PATCH 03/14] bugfix: remove client.stop --- nanobot/channels/feishu.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py index 4a6312e..4abac85 100644 --- a/nanobot/channels/feishu.py +++ b/nanobot/channels/feishu.py @@ -311,8 +311,8 @@ class FeishuChannel(BaseChannel): self._ws_client.start() except Exception as e: logger.warning("Feishu WebSocket error: {}", e) - if self._running: - import time; time.sleep(5) + if self._running: + import time; time.sleep(5) self._ws_thread = threading.Thread(target=run_ws, daemon=True) self._ws_thread.start() @@ -327,11 +327,6 @@ class FeishuChannel(BaseChannel): async def stop(self) -> None: """Stop the Feishu bot.""" self._running = False - if self._ws_client: - try: - self._ws_client.stop() - except Exception as e: - logger.warning("Error stopping WebSocket client: {}", e) logger.info("Feishu bot stopped") def _add_reaction_sync(self, message_id: str, emoji_type: str) -> None: From 73a708770e3a2e7331ae61100778cd4d78ced5c4 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 16:23:43 +0000 Subject: [PATCH 04/14] refactor: compress DingTalk helpers --- nanobot/channels/dingtalk.py | 77 ++++++++---------------------------- 1 file changed, 16 insertions(+), 61 deletions(-) diff --git a/nanobot/channels/dingtalk.py b/nanobot/channels/dingtalk.py index 53a9bb8..2797029 100644 --- a/nanobot/channels/dingtalk.py +++ b/nanobot/channels/dingtalk.py @@ -200,34 +200,18 @@ class DingTalkChannel(BaseChannel): @staticmethod def _is_http_url(value: str) -> bool: - low = value.lower() - return low.startswith("http://") or low.startswith("https://") + return urlparse(value).scheme in ("http", "https") def _guess_upload_type(self, media_ref: str) -> str: - parsed = urlparse(media_ref) - path = parsed.path if parsed.scheme else media_ref - ext = Path(path).suffix.lower() - if ext in self._IMAGE_EXTS: - return "image" - if ext in self._AUDIO_EXTS: - return "voice" - if ext in self._VIDEO_EXTS: - return "video" + ext = Path(urlparse(media_ref).path).suffix.lower() + if ext in self._IMAGE_EXTS: return "image" + if ext in self._AUDIO_EXTS: return "voice" + if ext in self._VIDEO_EXTS: return "video" return "file" def _guess_filename(self, media_ref: str, upload_type: str) -> str: - parsed = urlparse(media_ref) - path = parsed.path if parsed.scheme else media_ref - name = os.path.basename(path) - if name: - return name - fallback = { - "image": "image.jpg", - "voice": "audio.amr", - "video": "video.mp4", - "file": "file.bin", - } - return fallback.get(upload_type, "file.bin") + name = os.path.basename(urlparse(media_ref).path) + return name or {"image": "image.jpg", "voice": "audio.amr", "video": "video.mp4"}.get(upload_type, "file.bin") async def _read_media_bytes( self, @@ -288,33 +272,16 @@ class DingTalkChannel(BaseChannel): try: resp = await self._http.post(url, files=files) text = resp.text - try: - result = resp.json() - except Exception: - result = {} + result = resp.json() if resp.headers.get("content-type", "").startswith("application/json") else {} if resp.status_code >= 400: - logger.error( - "DingTalk media upload failed status={} type={} body={}", - resp.status_code, - media_type, - text[:500], - ) + logger.error("DingTalk media upload failed status={} type={} body={}", resp.status_code, media_type, text[:500]) return None errcode = result.get("errcode", 0) if errcode != 0: - logger.error( - "DingTalk media upload api error type={} errcode={} body={}", - media_type, - errcode, - text[:500], - ) + logger.error("DingTalk media upload api error type={} errcode={} body={}", media_type, errcode, text[:500]) return None - media_id = ( - result.get("media_id") - or result.get("mediaId") - or (result.get("result") or {}).get("media_id") - or (result.get("result") or {}).get("mediaId") - ) + sub = result.get("result") or {} + media_id = result.get("media_id") or result.get("mediaId") or sub.get("media_id") or sub.get("mediaId") if not media_id: logger.error("DingTalk media upload missing media_id body={}", text[:500]) return None @@ -347,25 +314,13 @@ class DingTalkChannel(BaseChannel): resp = await self._http.post(url, json=payload, headers=headers) body = resp.text if resp.status_code != 200: - logger.error( - "DingTalk send failed msgKey={} status={} body={}", - msg_key, - resp.status_code, - body[:500], - ) + logger.error("DingTalk send failed msgKey={} status={} body={}", msg_key, resp.status_code, body[:500]) return False - try: - result = resp.json() - except Exception: - result = {} + try: result = resp.json() + except Exception: result = {} errcode = result.get("errcode") if errcode not in (None, 0): - logger.error( - "DingTalk send api error msgKey={} errcode={} body={}", - msg_key, - errcode, - body[:500], - ) + logger.error("DingTalk send api error msgKey={} errcode={} body={}", msg_key, errcode, body[:500]) return False logger.debug("DingTalk message sent to {} with msgKey={}", chat_id, msg_key) return True From 5d829ca575464214b18b8af27d326a2db967e922 Mon Sep 17 00:00:00 2001 From: "zhangxiaoyu.york" Date: Sun, 1 Mar 2026 00:30:03 +0800 Subject: [PATCH 05/14] bugfix: remove client.stop --- nanobot/channels/feishu.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py index 4abac85..161d31e 100644 --- a/nanobot/channels/feishu.py +++ b/nanobot/channels/feishu.py @@ -325,7 +325,13 @@ class FeishuChannel(BaseChannel): await asyncio.sleep(1) async def stop(self) -> None: - """Stop the Feishu bot.""" + """ + Stop the Feishu bot. + + Notice: lark.ws.Client does not expose stop method, simply exiting the program will close the client. + + Reference: https://github.com/larksuite/oapi-sdk-python/blob/v2_main/lark_oapi/ws/client.py#L86 + """ self._running = False logger.info("Feishu bot stopped") From 8545d5790ebf0979eec3a96b850f12b7967688c3 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 16:32:50 +0000 Subject: [PATCH 06/14] refactor: streamline subagent prompt by reusing ContextBuilder and SkillsLoader --- README.md | 2 +- nanobot/agent/subagent.py | 44 +++++++++++++-------------------------- 2 files changed, 15 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index d788e5e..66da385 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ⚑️ Delivers core agent functionality in just **~4,000** lines of code β€” **99% smaller** than Clawdbot's 430k+ lines. -πŸ“ Real-time line count: **3,922 lines** (run `bash core_agent_lines.sh` to verify anytime) +πŸ“ Real-time line count: **3,927 lines** (run `bash core_agent_lines.sh` to verify anytime) ## πŸ“’ News diff --git a/nanobot/agent/subagent.py b/nanobot/agent/subagent.py index 337796c..5606303 100644 --- a/nanobot/agent/subagent.py +++ b/nanobot/agent/subagent.py @@ -104,8 +104,7 @@ class SubagentManager: tools.register(WebSearchTool(api_key=self.brave_api_key)) tools.register(WebFetchTool()) - # Build messages with subagent-specific prompt - system_prompt = self._build_subagent_prompt(task) + system_prompt = self._build_subagent_prompt() messages: list[dict[str, Any]] = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": task}, @@ -204,42 +203,27 @@ Summarize this naturally for the user. Keep it brief (1-2 sentences). Do not men await self.bus.publish_inbound(msg) logger.debug("Subagent [{}] announced result to {}:{}", task_id, origin['channel'], origin['chat_id']) - def _build_subagent_prompt(self, task: str) -> str: + def _build_subagent_prompt(self) -> str: """Build a focused system prompt for the subagent.""" - from datetime import datetime - import time as _time - now = datetime.now().strftime("%Y-%m-%d %H:%M (%A)") - tz = _time.strftime("%Z") or "UTC" + from nanobot.agent.context import ContextBuilder + from nanobot.agent.skills import SkillsLoader - return f"""# Subagent + time_ctx = ContextBuilder._build_runtime_context(None, None) + parts = [f"""# Subagent -## Current Time -{now} ({tz}) +{time_ctx} You are a subagent spawned by the main agent to complete a specific task. - -## Rules -1. Stay focused - complete only the assigned task, nothing else -2. Your final response will be reported back to the main agent -3. Do not initiate conversations or take on side tasks -4. Be concise but informative in your findings - -## What You Can Do -- Read and write files in the workspace -- Execute shell commands -- Search the web and fetch web pages -- Complete the task thoroughly - -## What You Cannot Do -- Send messages directly to users (no message tool available) -- Spawn other subagents -- Access the main agent's conversation history +Stay focused on the assigned task. Your final response will be reported back to the main agent. ## Workspace -Your workspace is at: {self.workspace} -Skills are available at: {self.workspace}/skills/ (read SKILL.md files as needed) +{self.workspace}"""] -When you have completed the task, provide a clear summary of your findings or actions.""" + skills_summary = SkillsLoader(self.workspace).build_skills_summary() + if skills_summary: + parts.append(f"## Skills\n\nRead SKILL.md with read_file to use a skill.\n\n{skills_summary}") + + return "\n\n".join(parts) async def cancel_by_session(self, session_key: str) -> int: """Cancel all subagents for the given session. Returns count cancelled.""" From cfe33ff7cd321813b03d1bc88a18bffc811dbeb9 Mon Sep 17 00:00:00 2001 From: zerone0x Date: Sat, 28 Feb 2026 17:35:07 +0100 Subject: [PATCH 07/14] fix(qq): disable botpy file log to fix read-only filesystem error When nanobot is run as a systemd service with ProtectSystem=strict, the process cwd defaults to the read-only root filesystem (/). botpy's default Client configuration includes a TimedRotatingFileHandler that writes 'botpy.log' to os.getcwd(), which raises [Errno 30] Read-only file system. Pass ext_handlers=False when constructing the botpy Client subclass to suppress the file handler. nanobot already routes all log output through loguru, so botpy's file handler is redundant. Fixes #1343 Co-Authored-By: Claude --- nanobot/channels/qq.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nanobot/channels/qq.py b/nanobot/channels/qq.py index 50dbbde..41e6ad3 100644 --- a/nanobot/channels/qq.py +++ b/nanobot/channels/qq.py @@ -31,7 +31,13 @@ def _make_bot_class(channel: "QQChannel") -> "type[botpy.Client]": class _Bot(botpy.Client): def __init__(self): - super().__init__(intents=intents) + # Disable botpy's default file handler (TimedRotatingFileHandler). + # By default botpy writes "botpy.log" to the process cwd, which + # fails under systemd with ProtectSystem=strict (read-only root fs). + # nanobot already handles logging via loguru, so the file handler is + # redundant. ext_handlers=False keeps console output but suppresses + # the file log. See: https://github.com/HKUDS/nanobot/issues/1343 + super().__init__(intents=intents, ext_handlers=False) async def on_ready(self): logger.info("QQ bot ready: {}", self.robot.name) From c34e1053f05ec8f96f68904dcd26fbf86e654afd Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 16:45:06 +0000 Subject: [PATCH 08/14] fix(qq): disable botpy file log to fix read-only filesystem error --- nanobot/channels/qq.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/nanobot/channels/qq.py b/nanobot/channels/qq.py index 41e6ad3..7b171bc 100644 --- a/nanobot/channels/qq.py +++ b/nanobot/channels/qq.py @@ -31,12 +31,7 @@ def _make_bot_class(channel: "QQChannel") -> "type[botpy.Client]": class _Bot(botpy.Client): def __init__(self): - # Disable botpy's default file handler (TimedRotatingFileHandler). - # By default botpy writes "botpy.log" to the process cwd, which - # fails under systemd with ProtectSystem=strict (read-only root fs). - # nanobot already handles logging via loguru, so the file handler is - # redundant. ext_handlers=False keeps console output but suppresses - # the file log. See: https://github.com/HKUDS/nanobot/issues/1343 + # Disable botpy's file log β€” nanobot uses loguru; default "botpy.log" fails on read-only fs super().__init__(intents=intents, ext_handlers=False) async def on_ready(self): From 9e2f69bd5a069c8e7b7a2288fa7e004a4409cec5 Mon Sep 17 00:00:00 2001 From: "zhangxiaoyu.york" Date: Sun, 1 Mar 2026 00:51:17 +0800 Subject: [PATCH 09/14] tidy up --- nanobot/channels/feishu.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py index 161d31e..16c6a07 100644 --- a/nanobot/channels/feishu.py +++ b/nanobot/channels/feishu.py @@ -311,8 +311,8 @@ class FeishuChannel(BaseChannel): self._ws_client.start() except Exception as e: logger.warning("Feishu WebSocket error: {}", e) - if self._running: - import time; time.sleep(5) + if self._running: + import time; time.sleep(5) self._ws_thread = threading.Thread(target=run_ws, daemon=True) self._ws_thread.start() From f9d72e2e74cb4177ed892b66fdf4dd639690793c Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 17:18:05 +0000 Subject: [PATCH 10/14] feat: add reasoning_effort config to enable LLM thinking mode --- README.md | 2 +- nanobot/agent/loop.py | 4 ++++ nanobot/agent/subagent.py | 3 +++ nanobot/cli/commands.py | 3 +++ nanobot/config/schema.py | 1 + nanobot/providers/base.py | 1 + nanobot/providers/custom_provider.py | 5 ++++- nanobot/providers/litellm_provider.py | 5 +++++ nanobot/providers/openai_codex_provider.py | 1 + 9 files changed, 23 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 66da385..0d46b7f 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ⚑️ Delivers core agent functionality in just **~4,000** lines of code β€” **99% smaller** than Clawdbot's 430k+ lines. -πŸ“ Real-time line count: **3,927 lines** (run `bash core_agent_lines.sh` to verify anytime) +πŸ“ Real-time line count: **3,935 lines** (run `bash core_agent_lines.sh` to verify anytime) ## πŸ“’ News diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py index d8e5cad..b42c3ba 100644 --- a/nanobot/agent/loop.py +++ b/nanobot/agent/loop.py @@ -56,6 +56,7 @@ class AgentLoop: temperature: float = 0.1, max_tokens: int = 4096, memory_window: int = 100, + reasoning_effort: str | None = None, brave_api_key: str | None = None, exec_config: ExecToolConfig | None = None, cron_service: CronService | None = None, @@ -74,6 +75,7 @@ class AgentLoop: self.temperature = temperature self.max_tokens = max_tokens self.memory_window = memory_window + self.reasoning_effort = reasoning_effort self.brave_api_key = brave_api_key self.exec_config = exec_config or ExecToolConfig() self.cron_service = cron_service @@ -89,6 +91,7 @@ class AgentLoop: model=self.model, temperature=self.temperature, max_tokens=self.max_tokens, + reasoning_effort=reasoning_effort, brave_api_key=brave_api_key, exec_config=self.exec_config, restrict_to_workspace=restrict_to_workspace, @@ -191,6 +194,7 @@ class AgentLoop: model=self.model, temperature=self.temperature, max_tokens=self.max_tokens, + reasoning_effort=self.reasoning_effort, ) if response.has_tool_calls: diff --git a/nanobot/agent/subagent.py b/nanobot/agent/subagent.py index 5606303..a99ba4d 100644 --- a/nanobot/agent/subagent.py +++ b/nanobot/agent/subagent.py @@ -28,6 +28,7 @@ class SubagentManager: model: str | None = None, temperature: float = 0.7, max_tokens: int = 4096, + reasoning_effort: str | None = None, brave_api_key: str | None = None, exec_config: "ExecToolConfig | None" = None, restrict_to_workspace: bool = False, @@ -39,6 +40,7 @@ class SubagentManager: self.model = model or provider.get_default_model() self.temperature = temperature self.max_tokens = max_tokens + self.reasoning_effort = reasoning_effort self.brave_api_key = brave_api_key self.exec_config = exec_config or ExecToolConfig() self.restrict_to_workspace = restrict_to_workspace @@ -124,6 +126,7 @@ class SubagentManager: model=self.model, temperature=self.temperature, max_tokens=self.max_tokens, + reasoning_effort=self.reasoning_effort, ) if response.has_tool_calls: diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py index fc4c261..2e417d6 100644 --- a/nanobot/cli/commands.py +++ b/nanobot/cli/commands.py @@ -283,6 +283,7 @@ def gateway( max_tokens=config.agents.defaults.max_tokens, max_iterations=config.agents.defaults.max_tool_iterations, memory_window=config.agents.defaults.memory_window, + reasoning_effort=config.agents.defaults.reasoning_effort, brave_api_key=config.tools.web.search.api_key or None, exec_config=config.tools.exec, cron_service=cron, @@ -441,6 +442,7 @@ def agent( max_tokens=config.agents.defaults.max_tokens, max_iterations=config.agents.defaults.max_tool_iterations, memory_window=config.agents.defaults.memory_window, + reasoning_effort=config.agents.defaults.reasoning_effort, brave_api_key=config.tools.web.search.api_key or None, exec_config=config.tools.exec, cron_service=cron, @@ -932,6 +934,7 @@ def cron_run( max_tokens=config.agents.defaults.max_tokens, max_iterations=config.agents.defaults.max_tool_iterations, memory_window=config.agents.defaults.memory_window, + reasoning_effort=config.agents.defaults.reasoning_effort, brave_api_key=config.tools.web.search.api_key or None, exec_config=config.tools.exec, restrict_to_workspace=config.tools.restrict_to_workspace, diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py index 1ff9782..4f06ebe 100644 --- a/nanobot/config/schema.py +++ b/nanobot/config/schema.py @@ -226,6 +226,7 @@ class AgentDefaults(Base): temperature: float = 0.1 max_tool_iterations: int = 40 memory_window: int = 100 + reasoning_effort: str | None = None # low / medium / high β€” enables LLM thinking mode class AgentsConfig(Base): diff --git a/nanobot/providers/base.py b/nanobot/providers/base.py index eb1599a..36e9938 100644 --- a/nanobot/providers/base.py +++ b/nanobot/providers/base.py @@ -88,6 +88,7 @@ class LLMProvider(ABC): model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, + reasoning_effort: str | None = None, ) -> LLMResponse: """ Send a chat completion request. diff --git a/nanobot/providers/custom_provider.py b/nanobot/providers/custom_provider.py index a578d14..56e6270 100644 --- a/nanobot/providers/custom_provider.py +++ b/nanobot/providers/custom_provider.py @@ -18,13 +18,16 @@ class CustomProvider(LLMProvider): self._client = AsyncOpenAI(api_key=api_key, base_url=api_base) async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None, - model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7) -> LLMResponse: + model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, + reasoning_effort: str | None = None) -> LLMResponse: kwargs: dict[str, Any] = { "model": model or self.default_model, "messages": self._sanitize_empty_content(messages), "max_tokens": max(1, max_tokens), "temperature": temperature, } + if reasoning_effort: + kwargs["reasoning_effort"] = reasoning_effort if tools: kwargs.update(tools=tools, tool_choice="auto") try: diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py index 5427d97..0067ae8 100644 --- a/nanobot/providers/litellm_provider.py +++ b/nanobot/providers/litellm_provider.py @@ -178,6 +178,7 @@ class LiteLLMProvider(LLMProvider): model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, + reasoning_effort: str | None = None, ) -> LLMResponse: """ Send a chat completion request via LiteLLM. @@ -224,6 +225,10 @@ class LiteLLMProvider(LLMProvider): if self.extra_headers: kwargs["extra_headers"] = self.extra_headers + if reasoning_effort: + kwargs["reasoning_effort"] = reasoning_effort + kwargs["drop_params"] = True + if tools: kwargs["tools"] = tools kwargs["tool_choice"] = "auto" diff --git a/nanobot/providers/openai_codex_provider.py b/nanobot/providers/openai_codex_provider.py index fa28593..9039202 100644 --- a/nanobot/providers/openai_codex_provider.py +++ b/nanobot/providers/openai_codex_provider.py @@ -31,6 +31,7 @@ class OpenAICodexProvider(LLMProvider): model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7, + reasoning_effort: str | None = None, ) -> LLMResponse: model = model or self.default_model system_prompt, input_items = _convert_messages(messages) From 5ca386ebf52f36441b44dacd85072d79aea0dd98 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 17:37:12 +0000 Subject: [PATCH 11/14] fix: preserve reasoning_content and thinking_blocks in session history --- nanobot/agent/context.py | 3 +++ nanobot/agent/loop.py | 4 +++- nanobot/providers/base.py | 1 + nanobot/providers/litellm_provider.py | 4 +++- 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/nanobot/agent/context.py b/nanobot/agent/context.py index be0ec59..a469bc8 100644 --- a/nanobot/agent/context.py +++ b/nanobot/agent/context.py @@ -150,6 +150,7 @@ Reply directly with text for conversations. Only use the 'message' tool to send content: str | None, tool_calls: list[dict[str, Any]] | None = None, reasoning_content: str | None = None, + thinking_blocks: list[dict] | None = None, ) -> list[dict[str, Any]]: """Add an assistant message to the message list.""" msg: dict[str, Any] = {"role": "assistant", "content": content} @@ -157,5 +158,7 @@ Reply directly with text for conversations. Only use the 'message' tool to send msg["tool_calls"] = tool_calls if reasoning_content is not None: msg["reasoning_content"] = reasoning_content + if thinking_blocks: + msg["thinking_blocks"] = thinking_blocks messages.append(msg) return messages diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py index b42c3ba..8da9fcb 100644 --- a/nanobot/agent/loop.py +++ b/nanobot/agent/loop.py @@ -218,6 +218,7 @@ class AgentLoop: messages = self.context.add_assistant_message( messages, response.content, tool_call_dicts, reasoning_content=response.reasoning_content, + thinking_blocks=response.thinking_blocks, ) for tool_call in response.tool_calls: @@ -238,6 +239,7 @@ class AgentLoop: break messages = self.context.add_assistant_message( messages, clean, reasoning_content=response.reasoning_content, + thinking_blocks=response.thinking_blocks, ) final_content = clean break @@ -451,7 +453,7 @@ class AgentLoop: """Save new-turn messages into session, truncating large tool results.""" from datetime import datetime for m in messages[skip:]: - entry = {k: v for k, v in m.items() if k != "reasoning_content"} + entry = dict(m) role, content = entry.get("role"), entry.get("content") if role == "assistant" and not content and not entry.get("tool_calls"): continue # skip empty assistant messages β€” they poison session context diff --git a/nanobot/providers/base.py b/nanobot/providers/base.py index 36e9938..25932a3 100644 --- a/nanobot/providers/base.py +++ b/nanobot/providers/base.py @@ -21,6 +21,7 @@ class LLMResponse: finish_reason: str = "stop" usage: dict[str, int] = field(default_factory=dict) reasoning_content: str | None = None # Kimi, DeepSeek-R1 etc. + thinking_blocks: list[dict] | None = None # Anthropic extended thinking @property def has_tool_calls(self) -> bool: diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py index 0067ae8..aff2ac7 100644 --- a/nanobot/providers/litellm_provider.py +++ b/nanobot/providers/litellm_provider.py @@ -16,7 +16,7 @@ from nanobot.providers.registry import find_by_model, find_gateway # Standard OpenAI chat-completion message keys plus reasoning_content for # thinking-enabled models (Kimi k2.5, DeepSeek-R1, etc.). -_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content"}) +_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content", "thinking_blocks"}) _ALNUM = string.ascii_letters + string.digits def _short_tool_id() -> str: @@ -271,6 +271,7 @@ class LiteLLMProvider(LLMProvider): } reasoning_content = getattr(message, "reasoning_content", None) or None + thinking_blocks = getattr(message, "thinking_blocks", None) or None return LLMResponse( content=message.content, @@ -278,6 +279,7 @@ class LiteLLMProvider(LLMProvider): finish_reason=choice.finish_reason or "stop", usage=usage, reasoning_content=reasoning_content, + thinking_blocks=thinking_blocks, ) def get_default_model(self) -> str: From 4f0530dd6147b057ef44af278ee577cf21ecebd5 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 17:55:18 +0000 Subject: [PATCH 12/14] release: v0.1.4.post3 --- nanobot/__init__.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nanobot/__init__.py b/nanobot/__init__.py index bb9bfb6..4dba5f4 100644 --- a/nanobot/__init__.py +++ b/nanobot/__init__.py @@ -2,5 +2,5 @@ nanobot - A lightweight AI agent framework """ -__version__ = "0.1.4.post2" +__version__ = "0.1.4.post3" __logo__ = "🐈" diff --git a/pyproject.toml b/pyproject.toml index 20dcb1e..a22053c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "nanobot-ai" -version = "0.1.4.post2" +version = "0.1.4.post3" description = "A lightweight personal AI assistant framework" requires-python = ">=3.11" license = {text = "MIT"} From ee9bd6a96c736295b54878f65a9489260a222c7d Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 18:04:12 +0000 Subject: [PATCH 13/14] docs: update v0.1.4.post3 release news --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 0d46b7f..4ae9aa2 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,10 @@ ## πŸ“’ News +- **2026-02-28** πŸš€ Released **v0.1.4.post3** β€” cleaner context, hardened session history, and smarter agent. See [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.4.post3) for details. +- **2026-02-27** 🧠 Experimental thinking mode support, DingTalk media messages, Feishu and QQ channel fixes. +- **2026-02-26** πŸ›‘οΈ Session poisoning fix, WhatsApp dedup, Windows path guard, Mistral compatibility. +- **2026-02-25** 🧹 New Matrix channel, cleaner session context, auto workspace template sync. - **2026-02-24** πŸš€ Released **v0.1.4.post2** β€” a reliability-focused release with a redesigned heartbeat, prompt cache optimization, and hardened provider & channel stability. See [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.4.post2) for details. - **2026-02-23** πŸ”§ Virtual tool-call heartbeat, prompt cache optimization, Slack mrkdwn fixes. - **2026-02-22** πŸ›‘οΈ Slack thread isolation, Discord typing fix, agent reliability improvements. From f172c9f381980a870ac47283a58136f09314b184 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 28 Feb 2026 18:06:56 +0000 Subject: [PATCH 14/14] docs: reformat release news with v0.1.4.post3 release --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 4ae9aa2..45779e7 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ ## πŸ“’ News -- **2026-02-28** πŸš€ Released **v0.1.4.post3** β€” cleaner context, hardened session history, and smarter agent. See [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.4.post3) for details. +- **2026-02-28** πŸš€ Released **v0.1.4.post3** β€” cleaner context, hardened session history, and smarter agent. Please see [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.4.post3) for details. - **2026-02-27** 🧠 Experimental thinking mode support, DingTalk media messages, Feishu and QQ channel fixes. - **2026-02-26** πŸ›‘οΈ Session poisoning fix, WhatsApp dedup, Windows path guard, Mistral compatibility. - **2026-02-25** 🧹 New Matrix channel, cleaner session context, auto workspace template sync. @@ -30,6 +30,10 @@ - **2026-02-21** πŸŽ‰ Released **v0.1.4.post1** β€” new providers, media support across channels, and major stability improvements. See [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.4.post1) for details. - **2026-02-20** 🐦 Feishu now receives multimodal files from users. More reliable memory under the hood. - **2026-02-19** ✨ Slack now sends files, Discord splits long messages, and subagents work in CLI mode. + +
+Earlier news + - **2026-02-18** ⚑️ nanobot now supports VolcEngine, MCP custom auth headers, and Anthropic prompt caching. - **2026-02-17** πŸŽ‰ Released **v0.1.4** β€” MCP support, progress streaming, new providers, and multiple channel improvements. Please see [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.4) for details. - **2026-02-16** 🦞 nanobot now integrates a [ClawHub](https://clawhub.ai) skill β€” search and install public agent skills. @@ -38,10 +42,6 @@ - **2026-02-13** πŸŽ‰ Released **v0.1.3.post7** β€” includes security hardening and multiple improvements. **Please upgrade to the latest version to address security issues**. See [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post7) for more details. - **2026-02-12** 🧠 Redesigned memory system β€” Less code, more reliable. Join the [discussion](https://github.com/HKUDS/nanobot/discussions/566) about it! - **2026-02-11** ✨ Enhanced CLI experience and added MiniMax support! - -
-Earlier news - - **2026-02-10** πŸŽ‰ Released **v0.1.3.post6** with improvements! Check the updates [notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post6) and our [roadmap](https://github.com/HKUDS/nanobot/discussions/431). - **2026-02-09** πŸ’¬ Added Slack, Email, and QQ support β€” nanobot now supports multiple chat platforms! - **2026-02-08** πŸ”§ Refactored Providersβ€”adding a new LLM provider now takes just 2 simple steps! Check [here](#providers).