From ae788a17f8371de0d60b7b9d713bdb8261fa6cd2 Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Mon, 2 Mar 2026 11:03:54 +0800
Subject: [PATCH 01/26] chore: add .worktrees to .gitignore
Co-Authored-By: Claude Opus 4.6
---
.gitignore | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitignore b/.gitignore
index d7b930d..a543251 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+.worktrees/
.assets
.env
*.pyc
From aed1ef55298433a963474d8fbdcf0b203945ffb5 Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Mon, 2 Mar 2026 11:04:53 +0800
Subject: [PATCH 02/26] fix: add SIGTERM, SIGHUP handling and ignore SIGPIPE
- Add handler for SIGTERM to prevent "Terminated" message on Linux
- Add handler for SIGHUP for terminal closure handling
- Ignore SIGPIPE to prevent silent process termination
- Change os._exit(0) to sys.exit(0) for proper cleanup
Fixes issue #1365
Co-Authored-By: Claude Opus 4.6
---
nanobot/cli/commands.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 2662e9f..8c53992 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -501,12 +501,17 @@ def agent(
else:
cli_channel, cli_chat_id = "cli", session_id
- def _exit_on_sigint(signum, frame):
+ def _handle_signal(signum, frame):
+ sig_name = signal.Signals(signum).name
_restore_terminal()
- console.print("\nGoodbye!")
- os._exit(0)
+ console.print(f"\nReceived {sig_name}, goodbye!")
+ sys.exit(0)
- signal.signal(signal.SIGINT, _exit_on_sigint)
+ signal.signal(signal.SIGINT, _handle_signal)
+ signal.signal(signal.SIGTERM, _handle_signal)
+ signal.signal(signal.SIGHUP, _handle_signal)
+ # Ignore SIGPIPE to prevent silent process termination when writing to closed pipes
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
async def run_interactive():
bus_task = asyncio.create_task(agent_loop.run())
From e9d023f52cbc7fb8eab37ab5aa4a501b0b5bdc81 Mon Sep 17 00:00:00 2001
From: Joel Chan
Date: Thu, 12 Feb 2026 17:10:50 +0800
Subject: [PATCH 03/26] feat(discord): add group policy to control group
respond behaviour
---
README.md | 8 +++++++-
nanobot/channels/discord.py | 36 +++++++++++++++++++++++++++++++++++-
nanobot/config/schema.py | 1 +
3 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 45779e7..f141a1c 100644
--- a/README.md
+++ b/README.md
@@ -293,12 +293,18 @@ If you prefer to configure manually, add the following to `~/.nanobot/config.jso
"discord": {
"enabled": true,
"token": "YOUR_BOT_TOKEN",
- "allowFrom": ["YOUR_USER_ID"]
+ "allowFrom": ["YOUR_USER_ID"],
+ "groupPolicy": "mention"
}
}
}
```
+> `groupPolicy` controls how the bot responds in group channels:
+> - `"mention"` (default) — Only respond when @mentioned
+> - `"open"` — Respond to all messages
+> DMs always respond when the sender is in `allowFrom`.
+
**5. Invite the bot**
- OAuth2 → URL Generator
- Scopes: `bot`
diff --git a/nanobot/channels/discord.py b/nanobot/channels/discord.py
index 57e5922..85ff28a 100644
--- a/nanobot/channels/discord.py
+++ b/nanobot/channels/discord.py
@@ -54,6 +54,7 @@ class DiscordChannel(BaseChannel):
self._heartbeat_task: asyncio.Task | None = None
self._typing_tasks: dict[str, asyncio.Task] = {}
self._http: httpx.AsyncClient | None = None
+ self._bot_user_id: str | None = None
async def start(self) -> None:
"""Start the Discord gateway connection."""
@@ -170,6 +171,10 @@ class DiscordChannel(BaseChannel):
await self._identify()
elif op == 0 and event_type == "READY":
logger.info("Discord gateway READY")
+ # Capture bot user ID for mention detection
+ user_data = payload.get("user") or {}
+ self._bot_user_id = user_data.get("id")
+ logger.info(f"Discord bot connected as user {self._bot_user_id}")
elif op == 0 and event_type == "MESSAGE_CREATE":
await self._handle_message_create(payload)
elif op == 7:
@@ -226,6 +231,7 @@ class DiscordChannel(BaseChannel):
sender_id = str(author.get("id", ""))
channel_id = str(payload.get("channel_id", ""))
content = payload.get("content") or ""
+ guild_id = payload.get("guild_id")
if not sender_id or not channel_id:
return
@@ -233,6 +239,11 @@ class DiscordChannel(BaseChannel):
if not self.is_allowed(sender_id):
return
+ # Check group channel policy (DMs always respond if is_allowed passes)
+ if guild_id is not None:
+ if not self._should_respond_in_group(payload, content):
+ return
+
content_parts = [content] if content else []
media_paths: list[str] = []
media_dir = Path.home() / ".nanobot" / "media"
@@ -269,11 +280,34 @@ class DiscordChannel(BaseChannel):
media=media_paths,
metadata={
"message_id": str(payload.get("id", "")),
- "guild_id": payload.get("guild_id"),
+ "guild_id": guild_id,
"reply_to": reply_to,
},
)
+ def _should_respond_in_group(self, payload: dict[str, Any], content: str) -> bool:
+ """Check if bot should respond in a group channel based on policy."""
+ channel_id = str(payload.get("channel_id", ""))
+
+ if self.config.group_policy == "open":
+ return True
+
+ if self.config.group_policy == "mention":
+ # Check if bot was mentioned in the message
+ if self._bot_user_id:
+ # Check mentions array
+ mentions = payload.get("mentions") or []
+ for mention in mentions:
+ if str(mention.get("id")) == self._bot_user_id:
+ return True
+ # Also check content for mention format <@USER_ID>
+ if f"<@{self._bot_user_id}>" in content or f"<@!{self._bot_user_id}>" in content:
+ return True
+ logger.debug(f"Discord message in {channel_id} ignored (bot not mentioned)")
+ return False
+
+ return True
+
async def _start_typing(self, channel_id: str) -> None:
"""Start periodic typing indicator for a channel."""
await self._stop_typing(channel_id)
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 6b80c81..e3d3d23 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -62,6 +62,7 @@ class DiscordConfig(Base):
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs
gateway_url: str = "wss://gateway.discord.gg/?v=10&encoding=json"
intents: int = 37377 # GUILDS + GUILD_MESSAGES + DIRECT_MESSAGES + MESSAGE_CONTENT
+ group_policy: str = "open" # "mention" or "open"
class MatrixConfig(Base):
From ecdfaf0a5a00b0772719aa306d4cb36d8512f9c7 Mon Sep 17 00:00:00 2001
From: David Markey
Date: Sun, 1 Mar 2026 20:49:00 +0000
Subject: [PATCH 04/26] feat(custom-provider): add x-session-affinity header
for prompt caching
---
nanobot/providers/custom_provider.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/nanobot/providers/custom_provider.py b/nanobot/providers/custom_provider.py
index 56e6270..02183f3 100644
--- a/nanobot/providers/custom_provider.py
+++ b/nanobot/providers/custom_provider.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import uuid
from typing import Any
import json_repair
@@ -15,7 +16,11 @@ class CustomProvider(LLMProvider):
def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"):
super().__init__(api_key, api_base)
self.default_model = default_model
- self._client = AsyncOpenAI(api_key=api_key, base_url=api_base)
+ self._client = AsyncOpenAI(
+ api_key=api_key,
+ base_url=api_base,
+ default_headers={"x-session-affinity": uuid.uuid4().hex},
+ )
async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None,
model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7,
From 8f4baaa5ce750fc073921fa29e734a0fe0da2056 Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Mon, 2 Mar 2026 22:01:02 +0800
Subject: [PATCH 05/26] feat(gateway): support multiple instances with
--workspace and --config options
- Add --workspace/-w flag to specify workspace directory
- Add --config/-c flag to specify config file path
- Move cron store to workspace directory for per-instance isolation
- Enable running multiple nanobot instances simultaneously
---
README.md | 27 +++++++++++++++++++++++++++
nanobot/cli/commands.py | 13 ++++++++++++-
2 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 01da228..3022708 100644
--- a/README.md
+++ b/README.md
@@ -884,6 +884,33 @@ MCP tools are automatically discovered and registered on startup. The LLM can us
| `channels.*.allowFrom` | `[]` (allow all) | Whitelist of user IDs. Empty = allow everyone; non-empty = only listed users can interact. |
+## Multiple Instances
+
+Run multiple nanobot instances simultaneously, each with its own workspace and configuration.
+
+```bash
+# Instance A - Telegram bot
+nanobot gateway -w ~/.nanobot/botA -p 18791
+
+# Instance B - Discord bot
+nanobot gateway -w ~/.nanobot/botB -p 18792
+
+# Instance C - Using custom config file
+nanobot gateway -w ~/.nanobot/botC -c ~/.nanobot/botC/config.json -p 18793
+```
+
+| Option | Short | Description |
+|--------|-------|-------------|
+| `--workspace` | `-w` | Workspace directory (default: `~/.nanobot/workspace`) |
+| `--config` | `-c` | Config file path (default: `~/.nanobot/config.json`) |
+| `--port` | `-p` | Gateway port (default: `18790`) |
+
+Each instance has its own:
+- Workspace directory (MEMORY.md, HEARTBEAT.md, session files)
+- Cron jobs storage (`workspace/cron/jobs.json`)
+- Configuration (if using `--config`)
+
+
## CLI Reference
| Command | Description |
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 2662e9f..e599b11 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -244,6 +244,8 @@ def _make_provider(config: Config):
@app.command()
def gateway(
port: int = typer.Option(18790, "--port", "-p", help="Gateway port"),
+ workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory (default: ~/.nanobot/workspace)"),
+ config: str | None = typer.Option(None, "--config", "-c", help="Config file path"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
):
"""Start the nanobot gateway."""
@@ -260,6 +262,14 @@ def gateway(
import logging
logging.basicConfig(level=logging.DEBUG)
+ # Load config from custom path if provided, otherwise use default
+ config_path = Path(config) if config else None
+ config = load_config(config_path)
+
+ # Override workspace if specified via command line
+ if workspace:
+ config.agents.defaults.workspace = workspace
+
console.print(f"{__logo__} Starting nanobot gateway on port {port}...")
config = load_config()
@@ -269,7 +279,8 @@ def gateway(
session_manager = SessionManager(config.workspace_path)
# Create cron service first (callback set after agent creation)
- cron_store_path = get_data_dir() / "cron" / "jobs.json"
+ # Use workspace path for per-instance cron store
+ cron_store_path = config.workspace_path / "cron" / "jobs.json"
cron = CronService(cron_store_path)
# Create agent with cron service
From 5f7fb9c75ad1d3d442d4236607c827ad97a132fd Mon Sep 17 00:00:00 2001
From: cocolato
Date: Tue, 3 Mar 2026 23:40:56 +0800
Subject: [PATCH 06/26] add missed dependency
---
pyproject.toml | 1 +
1 file changed, 1 insertion(+)
diff --git a/pyproject.toml b/pyproject.toml
index a22053c..4199af1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -42,6 +42,7 @@ dependencies = [
"prompt-toolkit>=3.0.50,<4.0.0",
"mcp>=1.26.0,<2.0.0",
"json-repair>=0.57.0,<1.0.0",
+ "openai>=2.8.0",
]
[project.optional-dependencies]
From 3e83425142334c6d712c210ac73254488f749150 Mon Sep 17 00:00:00 2001
From: worenidewen
Date: Wed, 4 Mar 2026 01:06:04 +0800
Subject: [PATCH 07/26] feat(mcp): add SSE transport support with
auto-detection
---
nanobot/agent/tools/mcp.py | 37 ++++++++++++++++++--
nanobot/config/schema.py | 72 ++++++++++++++++++++++++++------------
2 files changed, 83 insertions(+), 26 deletions(-)
diff --git a/nanobot/agent/tools/mcp.py b/nanobot/agent/tools/mcp.py
index 37464e1..151aa55 100644
--- a/nanobot/agent/tools/mcp.py
+++ b/nanobot/agent/tools/mcp.py
@@ -62,12 +62,43 @@ async def connect_mcp_servers(
for name, cfg in mcp_servers.items():
try:
- if cfg.command:
+ transport_type = cfg.type
+ if not transport_type:
+ if cfg.command:
+ transport_type = "stdio"
+ elif cfg.url:
+ transport_type = (
+ "sse" if cfg.url.rstrip("/").endswith("/sse") else "streamableHttp"
+ )
+ else:
+ logger.warning("MCP server '{}': no command or url configured, skipping", name)
+ continue
+
+ if transport_type == "stdio":
params = StdioServerParameters(
command=cfg.command, args=cfg.args, env=cfg.env or None
)
read, write = await stack.enter_async_context(stdio_client(params))
- elif cfg.url:
+ elif transport_type == "sse":
+ from mcp.client.sse import sse_client
+
+ def httpx_client_factory(
+ headers: dict[str, str] | None = None,
+ timeout: httpx.Timeout | None = None,
+ auth: httpx.Auth | None = None,
+ ) -> httpx.AsyncClient:
+ merged_headers = {**(cfg.headers or {}), **(headers or {})}
+ return httpx.AsyncClient(
+ headers=merged_headers or None,
+ follow_redirects=True,
+ timeout=timeout,
+ auth=auth,
+ )
+
+ read, write = await stack.enter_async_context(
+ sse_client(cfg.url, httpx_client_factory=httpx_client_factory)
+ )
+ elif transport_type == "streamableHttp":
from mcp.client.streamable_http import streamable_http_client
# Always provide an explicit httpx client so MCP HTTP transport does not
# inherit httpx's default 5s timeout and preempt the higher-level tool timeout.
@@ -82,7 +113,7 @@ async def connect_mcp_servers(
streamable_http_client(cfg.url, http_client=http_client)
)
else:
- logger.warning("MCP server '{}': no command or url configured, skipping", name)
+ logger.warning("MCP server '{}': unknown transport type '{}'", name, transport_type)
continue
session = await stack.enter_async_context(ClientSession(read, write))
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 61a7bd2..64e60dc 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -29,7 +29,9 @@ class TelegramConfig(Base):
enabled: bool = False
token: str = "" # Bot token from @BotFather
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs or usernames
- proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ proxy: str | None = (
+ None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ )
reply_to_message: bool = False # If true, bot replies quote the original message
@@ -42,7 +44,9 @@ class FeishuConfig(Base):
encrypt_key: str = "" # Encrypt Key for event subscription (optional)
verification_token: str = "" # Verification Token for event subscription (optional)
allow_from: list[str] = Field(default_factory=list) # Allowed user open_ids
- react_emoji: str = "THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE)
+ react_emoji: str = (
+ "THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE)
+ )
class DingTalkConfig(Base):
@@ -72,9 +76,13 @@ class MatrixConfig(Base):
access_token: str = ""
user_id: str = "" # @bot:matrix.org
device_id: str = ""
- e2ee_enabled: bool = True # Enable Matrix E2EE support (encryption + encrypted room handling).
- sync_stop_grace_seconds: int = 2 # Max seconds to wait for sync_forever to stop gracefully before cancellation fallback.
- max_media_bytes: int = 20 * 1024 * 1024 # Max attachment size accepted for Matrix media handling (inbound + outbound).
+ e2ee_enabled: bool = True # Enable Matrix E2EE support (encryption + encrypted room handling).
+ sync_stop_grace_seconds: int = (
+ 2 # Max seconds to wait for sync_forever to stop gracefully before cancellation fallback.
+ )
+ max_media_bytes: int = (
+ 20 * 1024 * 1024
+ ) # Max attachment size accepted for Matrix media handling (inbound + outbound).
allow_from: list[str] = Field(default_factory=list)
group_policy: Literal["open", "mention", "allowlist"] = "open"
group_allow_from: list[str] = Field(default_factory=list)
@@ -105,7 +113,9 @@ class EmailConfig(Base):
from_address: str = ""
# Behavior
- auto_reply_enabled: bool = True # If false, inbound email is read but no automatic reply is sent
+ auto_reply_enabled: bool = (
+ True # If false, inbound email is read but no automatic reply is sent
+ )
poll_interval_seconds: int = 30
mark_seen: bool = True
max_body_chars: int = 12000
@@ -183,27 +193,32 @@ class QQConfig(Base):
enabled: bool = False
app_id: str = "" # 机器人 ID (AppID) from q.qq.com
secret: str = "" # 机器人密钥 (AppSecret) from q.qq.com
- allow_from: list[str] = Field(default_factory=list) # Allowed user openids (empty = public access)
+ allow_from: list[str] = Field(
+ default_factory=list
+ ) # Allowed user openids (empty = public access)
+
class MatrixConfig(Base):
"""Matrix (Element) channel configuration."""
+
enabled: bool = False
homeserver: str = "https://matrix.org"
access_token: str = ""
- user_id: str = "" # e.g. @bot:matrix.org
+ user_id: str = "" # e.g. @bot:matrix.org
device_id: str = ""
- e2ee_enabled: bool = True # end-to-end encryption support
- sync_stop_grace_seconds: int = 2 # graceful sync_forever shutdown timeout
- max_media_bytes: int = 20 * 1024 * 1024 # inbound + outbound attachment limit
+ e2ee_enabled: bool = True # end-to-end encryption support
+ sync_stop_grace_seconds: int = 2 # graceful sync_forever shutdown timeout
+ max_media_bytes: int = 20 * 1024 * 1024 # inbound + outbound attachment limit
allow_from: list[str] = Field(default_factory=list)
group_policy: Literal["open", "mention", "allowlist"] = "open"
group_allow_from: list[str] = Field(default_factory=list)
allow_room_mentions: bool = False
+
class ChannelsConfig(Base):
"""Configuration for chat channels."""
- send_progress: bool = True # stream agent's text progress to the channel
+ send_progress: bool = True # stream agent's text progress to the channel
send_tool_hints: bool = False # stream tool-call hints (e.g. read_file("…"))
whatsapp: WhatsAppConfig = Field(default_factory=WhatsAppConfig)
telegram: TelegramConfig = Field(default_factory=TelegramConfig)
@@ -222,7 +237,9 @@ class AgentDefaults(Base):
workspace: str = "~/.nanobot/workspace"
model: str = "anthropic/claude-opus-4-5"
- provider: str = "auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection
+ provider: str = (
+ "auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection
+ )
max_tokens: int = 8192
temperature: float = 0.1
max_tool_iterations: int = 40
@@ -260,8 +277,12 @@ class ProvidersConfig(Base):
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
- siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动) API gateway
- volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎) API gateway
+ siliconflow: ProviderConfig = Field(
+ default_factory=ProviderConfig
+ ) # SiliconFlow (硅基流动) API gateway
+ volcengine: ProviderConfig = Field(
+ default_factory=ProviderConfig
+ ) # VolcEngine (火山引擎) API gateway
openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
github_copilot: ProviderConfig = Field(default_factory=ProviderConfig) # Github Copilot (OAuth)
@@ -291,7 +312,9 @@ class WebSearchConfig(Base):
class WebToolsConfig(Base):
"""Web tools configuration."""
- proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ proxy: str | None = (
+ None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ )
search: WebSearchConfig = Field(default_factory=WebSearchConfig)
@@ -305,12 +328,13 @@ class ExecToolConfig(Base):
class MCPServerConfig(Base):
"""MCP server connection configuration (stdio or HTTP)."""
- command: str = "" # Stdio: command to run (e.g. "npx")
- args: list[str] = Field(default_factory=list) # Stdio: command arguments
- env: dict[str, str] = Field(default_factory=dict) # Stdio: extra env vars
- url: str = "" # HTTP: streamable HTTP endpoint URL
- headers: dict[str, str] = Field(default_factory=dict) # HTTP: Custom HTTP Headers
- tool_timeout: int = 30 # Seconds before a tool call is cancelled
+ type: Literal["stdio", "sse", "streamableHttp"] | None = None
+ command: str = ""
+ args: list[str] = Field(default_factory=list)
+ env: dict[str, str] = Field(default_factory=dict)
+ url: str = ""
+ headers: dict[str, str] = Field(default_factory=dict)
+ tool_timeout: int = 30
class ToolsConfig(Base):
@@ -336,7 +360,9 @@ class Config(BaseSettings):
"""Get expanded workspace path."""
return Path(self.agents.defaults.workspace).expanduser()
- def _match_provider(self, model: str | None = None) -> tuple["ProviderConfig | None", str | None]:
+ def _match_provider(
+ self, model: str | None = None
+ ) -> tuple["ProviderConfig | None", str | None]:
"""Match provider config and its registry name. Returns (config, spec_name)."""
from nanobot.providers.registry import PROVIDERS
From d0a48ed23c7eb578702f9dd5e7d4dc009d022efa Mon Sep 17 00:00:00 2001
From: Liwx
Date: Wed, 4 Mar 2026 14:00:40 +0800
Subject: [PATCH 08/26] Update qq.py
---
nanobot/channels/qq.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/nanobot/channels/qq.py b/nanobot/channels/qq.py
index 7b171bc..99a712b 100644
--- a/nanobot/channels/qq.py
+++ b/nanobot/channels/qq.py
@@ -56,6 +56,7 @@ class QQChannel(BaseChannel):
self.config: QQConfig = config
self._client: "botpy.Client | None" = None
self._processed_ids: deque = deque(maxlen=1000)
+ self._msg_seq: int = 1 # Message sequence to avoid QQ API deduplication
async def start(self) -> None:
"""Start the QQ bot."""
From 20bec3bc266ef84399d3170cef6b4b5de8627f67 Mon Sep 17 00:00:00 2001
From: Liwx
Date: Wed, 4 Mar 2026 14:06:19 +0800
Subject: [PATCH 09/26] Update qq.py
---
nanobot/channels/qq.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/nanobot/channels/qq.py b/nanobot/channels/qq.py
index 99a712b..6c58049 100644
--- a/nanobot/channels/qq.py
+++ b/nanobot/channels/qq.py
@@ -56,7 +56,7 @@ class QQChannel(BaseChannel):
self.config: QQConfig = config
self._client: "botpy.Client | None" = None
self._processed_ids: deque = deque(maxlen=1000)
- self._msg_seq: int = 1 # Message sequence to avoid QQ API deduplication
+ self._msg_seq: int = 1 # 消息序列号,避免被 QQ API 去重
async def start(self) -> None:
"""Start the QQ bot."""
@@ -103,11 +103,13 @@ class QQChannel(BaseChannel):
return
try:
msg_id = msg.metadata.get("message_id")
+ self._msg_seq += 1 # 递增序列号
await self._client.api.post_c2c_message(
openid=msg.chat_id,
msg_type=0,
content=msg.content,
msg_id=msg_id,
+ msg_seq=self._msg_seq, # 添加序列号避免去重
)
except Exception as e:
logger.error("Error sending QQ message: {}", e)
@@ -134,3 +136,4 @@ class QQChannel(BaseChannel):
)
except Exception:
logger.exception("Error handling QQ message")
+
From df8d09f2b6c0eb23298e41acbe139fad9d38f325 Mon Sep 17 00:00:00 2001
From: Kiplangatkorir
Date: Wed, 4 Mar 2026 10:53:30 +0300
Subject: [PATCH 10/26] fix: guard validate_params against non-dict input
When the LLM returns malformed tool arguments (e.g. a list or string
instead of a dict), validate_params would crash with AttributeError
in _validate() when calling val.items(). Now returns a clear
validation error instead of crashing.
---
nanobot/agent/tools/base.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/nanobot/agent/tools/base.py b/nanobot/agent/tools/base.py
index 8dd82c7..051fc9a 100644
--- a/nanobot/agent/tools/base.py
+++ b/nanobot/agent/tools/base.py
@@ -54,6 +54,8 @@ class Tool(ABC):
def validate_params(self, params: dict[str, Any]) -> list[str]:
"""Validate tool parameters against JSON schema. Returns error list (empty if valid)."""
+ if not isinstance(params, dict):
+ return [f"parameters must be an object, got {type(params).__name__}"]
schema = self.parameters or {}
if schema.get("type", "object") != "object":
raise ValueError(f"Schema must be object type, got {schema.get('type')!r}")
From edaf7a244a0d65395cab954fc768dc8031489b29 Mon Sep 17 00:00:00 2001
From: Kiplangatkorir
Date: Wed, 4 Mar 2026 10:55:17 +0300
Subject: [PATCH 11/26] fix: handle invalid ISO datetime in CronTool gracefully
datetime.fromisoformat(at) raises ValueError for malformed strings,
which propagated uncaught and crashed the tool execution. Now catches
ValueError and returns a user-friendly error message instead.
---
nanobot/agent/tools/cron.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/nanobot/agent/tools/cron.py b/nanobot/agent/tools/cron.py
index 13b1e12..f8e737b 100644
--- a/nanobot/agent/tools/cron.py
+++ b/nanobot/agent/tools/cron.py
@@ -122,7 +122,10 @@ class CronTool(Tool):
elif at:
from datetime import datetime
- dt = datetime.fromisoformat(at)
+ try:
+ dt = datetime.fromisoformat(at)
+ except ValueError:
+ return f"Error: invalid ISO datetime format '{at}'. Expected format: YYYY-MM-DDTHH:MM:SS"
at_ms = int(dt.timestamp() * 1000)
schedule = CronSchedule(kind="at", at_ms=at_ms)
delete_after = True
From ce65f8c11be13b51f242890cabdf15f4e0d1b12a Mon Sep 17 00:00:00 2001
From: Kiplangatkorir
Date: Wed, 4 Mar 2026 11:15:45 +0300
Subject: [PATCH 12/26] fix: add size limit to ReadFileTool to prevent OOM
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
ReadFileTool had no file size check — reading a multi-GB file would
load everything into memory and crash the process. Now:
- Rejects files over ~512KB at the byte level (fast stat check)
- Truncates at 128K chars with a notice if content is too long
- Guides the agent to use exec with head/tail/grep for large files
This matches the protection already in ExecTool (10KB) and
WebFetchTool (50KB).
---
nanobot/agent/tools/filesystem.py | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/nanobot/agent/tools/filesystem.py b/nanobot/agent/tools/filesystem.py
index bbdd49c..7b0b867 100644
--- a/nanobot/agent/tools/filesystem.py
+++ b/nanobot/agent/tools/filesystem.py
@@ -26,6 +26,8 @@ def _resolve_path(
class ReadFileTool(Tool):
"""Tool to read file contents."""
+ _MAX_CHARS = 128_000 # ~128 KB — prevents OOM from reading huge files into LLM context
+
def __init__(self, workspace: Path | None = None, allowed_dir: Path | None = None):
self._workspace = workspace
self._allowed_dir = allowed_dir
@@ -54,7 +56,16 @@ class ReadFileTool(Tool):
if not file_path.is_file():
return f"Error: Not a file: {path}"
+ size = file_path.stat().st_size
+ if size > self._MAX_CHARS * 4: # rough upper bound (UTF-8 chars ≤ 4 bytes)
+ return (
+ f"Error: File too large ({size:,} bytes). "
+ f"Use exec tool with head/tail/grep to read portions."
+ )
+
content = file_path.read_text(encoding="utf-8")
+ if len(content) > self._MAX_CHARS:
+ return content[: self._MAX_CHARS] + f"\n\n... (truncated — file is {len(content):,} chars, limit {self._MAX_CHARS:,})"
return content
except PermissionError as e:
return f"Error: {e}"
From bb8512ca842fc3b14c6dee01c5aaf9e241f8344e Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Wed, 4 Mar 2026 20:42:49 +0800
Subject: [PATCH 13/26] test: fix test failures from refactored cron and
context builder
- test_context_prompt_cache: Update test to reflect merged runtime
context and user message (commit ad99d5a merged them into one)
- Remove test_cron_commands.py: cron add CLI command was removed
in commit c05cb2e (unified scheduling via cron tool)
---
tests/test_context_prompt_cache.py | 19 +++++++++----------
tests/test_cron_commands.py | 29 -----------------------------
2 files changed, 9 insertions(+), 39 deletions(-)
delete mode 100644 tests/test_cron_commands.py
diff --git a/tests/test_context_prompt_cache.py b/tests/test_context_prompt_cache.py
index 9afcc7d..ce796e2 100644
--- a/tests/test_context_prompt_cache.py
+++ b/tests/test_context_prompt_cache.py
@@ -40,7 +40,7 @@ def test_system_prompt_stays_stable_when_clock_changes(tmp_path, monkeypatch) ->
def test_runtime_context_is_separate_untrusted_user_message(tmp_path) -> None:
- """Runtime metadata should be a separate user message before the actual user message."""
+ """Runtime metadata should be merged with the user message."""
workspace = _make_workspace(tmp_path)
builder = ContextBuilder(workspace)
@@ -54,13 +54,12 @@ def test_runtime_context_is_separate_untrusted_user_message(tmp_path) -> None:
assert messages[0]["role"] == "system"
assert "## Current Session" not in messages[0]["content"]
- assert messages[-2]["role"] == "user"
- runtime_content = messages[-2]["content"]
- assert isinstance(runtime_content, str)
- assert ContextBuilder._RUNTIME_CONTEXT_TAG in runtime_content
- assert "Current Time:" in runtime_content
- assert "Channel: cli" in runtime_content
- assert "Chat ID: direct" in runtime_content
-
+ # Runtime context is now merged with user message into a single message
assert messages[-1]["role"] == "user"
- assert messages[-1]["content"] == "Return exactly: OK"
+ user_content = messages[-1]["content"]
+ assert isinstance(user_content, str)
+ assert ContextBuilder._RUNTIME_CONTEXT_TAG in user_content
+ assert "Current Time:" in user_content
+ assert "Channel: cli" in user_content
+ assert "Chat ID: direct" in user_content
+ assert "Return exactly: OK" in user_content
diff --git a/tests/test_cron_commands.py b/tests/test_cron_commands.py
deleted file mode 100644
index bce1ef5..0000000
--- a/tests/test_cron_commands.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from typer.testing import CliRunner
-
-from nanobot.cli.commands import app
-
-runner = CliRunner()
-
-
-def test_cron_add_rejects_invalid_timezone(monkeypatch, tmp_path) -> None:
- monkeypatch.setattr("nanobot.config.loader.get_data_dir", lambda: tmp_path)
-
- result = runner.invoke(
- app,
- [
- "cron",
- "add",
- "--name",
- "demo",
- "--message",
- "hello",
- "--cron",
- "0 9 * * *",
- "--tz",
- "America/Vancovuer",
- ],
- )
-
- assert result.exit_code == 1
- assert "Error: unknown timezone 'America/Vancovuer'" in result.stdout
- assert not (tmp_path / "cron" / "jobs.json").exists()
From ecdf30940459a27311855a97cfdb7599cb3f89a2 Mon Sep 17 00:00:00 2001
From: Daniel Emden
Date: Wed, 4 Mar 2026 15:31:56 +0100
Subject: [PATCH 14/26] fix(codex): pass reasoning_effort to Codex API
The OpenAI Codex provider accepts reasoning_effort but silently
discards it. Wire it through as {"reasoning": {"effort": ...}} in
the request body so the config option actually takes effect.
---
nanobot/providers/openai_codex_provider.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/nanobot/providers/openai_codex_provider.py b/nanobot/providers/openai_codex_provider.py
index b6afa65..d04e210 100644
--- a/nanobot/providers/openai_codex_provider.py
+++ b/nanobot/providers/openai_codex_provider.py
@@ -52,6 +52,9 @@ class OpenAICodexProvider(LLMProvider):
"parallel_tool_calls": True,
}
+ if reasoning_effort:
+ body["reasoning"] = {"effort": reasoning_effort}
+
if tools:
body["tools"] = _convert_tools(tools)
From c64fe0afd8cfcbfe0c26569140db33b473f87854 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Wed, 4 Mar 2026 16:53:07 +0100
Subject: [PATCH 15/26] fix(tests): resolve failing tests on main branch
- Unskip matrix logic by adding missing deps (matrix-nio, nh3, mistune)
- Update matrix tests for 'allow_from' default deny security change
- Fix asyncio typing keepalive leak in matrix tests
- Update context prompt cache assert after runtime message merge
- Fix flaky cron service test with mtime sleep
- Remove obsolete test_cron_commands.py testing deleted CLI commands
---
pyproject.toml | 3 +++
tests/test_context_prompt_cache.py | 9 ++++-----
tests/test_cron_commands.py | 29 -----------------------------
tests/test_cron_service.py | 2 ++
tests/test_matrix_channel.py | 20 ++++++++++++++++++--
5 files changed, 27 insertions(+), 36 deletions(-)
delete mode 100644 tests/test_cron_commands.py
diff --git a/pyproject.toml b/pyproject.toml
index a22053c..0546523 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -54,6 +54,9 @@ dev = [
"pytest>=9.0.0,<10.0.0",
"pytest-asyncio>=1.3.0,<2.0.0",
"ruff>=0.1.0",
+ "matrix-nio[e2e]>=0.25.2",
+ "mistune>=3.0.0,<4.0.0",
+ "nh3>=0.2.17,<1.0.0",
]
[project.scripts]
diff --git a/tests/test_context_prompt_cache.py b/tests/test_context_prompt_cache.py
index 9afcc7d..38b8d35 100644
--- a/tests/test_context_prompt_cache.py
+++ b/tests/test_context_prompt_cache.py
@@ -54,13 +54,12 @@ def test_runtime_context_is_separate_untrusted_user_message(tmp_path) -> None:
assert messages[0]["role"] == "system"
assert "## Current Session" not in messages[0]["content"]
- assert messages[-2]["role"] == "user"
- runtime_content = messages[-2]["content"]
+ assert len(messages) == 2
+ assert messages[-1]["role"] == "user"
+ runtime_content = messages[-1]["content"]
assert isinstance(runtime_content, str)
assert ContextBuilder._RUNTIME_CONTEXT_TAG in runtime_content
assert "Current Time:" in runtime_content
assert "Channel: cli" in runtime_content
assert "Chat ID: direct" in runtime_content
-
- assert messages[-1]["role"] == "user"
- assert messages[-1]["content"] == "Return exactly: OK"
+ assert "Return exactly: OK" in runtime_content
diff --git a/tests/test_cron_commands.py b/tests/test_cron_commands.py
deleted file mode 100644
index bce1ef5..0000000
--- a/tests/test_cron_commands.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from typer.testing import CliRunner
-
-from nanobot.cli.commands import app
-
-runner = CliRunner()
-
-
-def test_cron_add_rejects_invalid_timezone(monkeypatch, tmp_path) -> None:
- monkeypatch.setattr("nanobot.config.loader.get_data_dir", lambda: tmp_path)
-
- result = runner.invoke(
- app,
- [
- "cron",
- "add",
- "--name",
- "demo",
- "--message",
- "hello",
- "--cron",
- "0 9 * * *",
- "--tz",
- "America/Vancovuer",
- ],
- )
-
- assert result.exit_code == 1
- assert "Error: unknown timezone 'America/Vancovuer'" in result.stdout
- assert not (tmp_path / "cron" / "jobs.json").exists()
diff --git a/tests/test_cron_service.py b/tests/test_cron_service.py
index 2a36f4c..9631da5 100644
--- a/tests/test_cron_service.py
+++ b/tests/test_cron_service.py
@@ -48,6 +48,8 @@ async def test_running_service_honors_external_disable(tmp_path) -> None:
)
await service.start()
try:
+ # Wait slightly to ensure file mtime is definitively different
+ await asyncio.sleep(0.05)
external = CronService(store_path)
updated = external.enable_job(job.id, enabled=False)
assert updated is not None
diff --git a/tests/test_matrix_channel.py b/tests/test_matrix_channel.py
index c6714c2..c25b95a 100644
--- a/tests/test_matrix_channel.py
+++ b/tests/test_matrix_channel.py
@@ -159,6 +159,7 @@ class _FakeAsyncClient:
def _make_config(**kwargs) -> MatrixConfig:
+ kwargs.setdefault("allow_from", ["*"])
return MatrixConfig(
enabled=True,
homeserver="https://matrix.org",
@@ -274,7 +275,7 @@ async def test_stop_stops_sync_forever_before_close(monkeypatch) -> None:
@pytest.mark.asyncio
-async def test_room_invite_joins_when_allow_list_is_empty() -> None:
+async def test_room_invite_ignores_when_allow_list_is_empty() -> None:
channel = MatrixChannel(_make_config(allow_from=[]), MessageBus())
client = _FakeAsyncClient("", "", "", None)
channel.client = client
@@ -284,9 +285,22 @@ async def test_room_invite_joins_when_allow_list_is_empty() -> None:
await channel._on_room_invite(room, event)
- assert client.join_calls == ["!room:matrix.org"]
+ assert client.join_calls == []
+@pytest.mark.asyncio
+async def test_room_invite_joins_when_sender_allowed() -> None:
+ channel = MatrixChannel(_make_config(allow_from=["@alice:matrix.org"]), MessageBus())
+ client = _FakeAsyncClient("", "", "", None)
+ channel.client = client
+
+ room = SimpleNamespace(room_id="!room:matrix.org")
+ event = SimpleNamespace(sender="@alice:matrix.org")
+
+ await channel._on_room_invite(room, event)
+
+ assert client.join_calls == ["!room:matrix.org"]
+
@pytest.mark.asyncio
async def test_room_invite_respects_allow_list_when_configured() -> None:
channel = MatrixChannel(_make_config(allow_from=["@bob:matrix.org"]), MessageBus())
@@ -1163,6 +1177,8 @@ async def test_send_progress_keeps_typing_keepalive_running() -> None:
assert "!room:matrix.org" in channel._typing_tasks
assert client.typing_calls[-1] == ("!room:matrix.org", True, TYPING_NOTICE_TIMEOUT_MS)
+ await channel.stop()
+
@pytest.mark.asyncio
async def test_send_clears_typing_when_send_fails() -> None:
From 88d7642c1ec570e07eef473f47d1d637b38b9b07 Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Wed, 4 Mar 2026 20:42:49 +0800
Subject: [PATCH 16/26] test: fix test failures from refactored cron and
context builder
- test_context_prompt_cache: Update test to reflect merged runtime
context and user message (commit ad99d5a merged them into one)
- Remove test_cron_commands.py: cron add CLI command was removed
in commit c05cb2e (unified scheduling via cron tool)
---
tests/test_context_prompt_cache.py | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/tests/test_context_prompt_cache.py b/tests/test_context_prompt_cache.py
index 38b8d35..fa7f02d 100644
--- a/tests/test_context_prompt_cache.py
+++ b/tests/test_context_prompt_cache.py
@@ -40,7 +40,7 @@ def test_system_prompt_stays_stable_when_clock_changes(tmp_path, monkeypatch) ->
def test_runtime_context_is_separate_untrusted_user_message(tmp_path) -> None:
- """Runtime metadata should be a separate user message before the actual user message."""
+ """Runtime metadata should be merged with the user message."""
workspace = _make_workspace(tmp_path)
builder = ContextBuilder(workspace)
@@ -55,11 +55,12 @@ def test_runtime_context_is_separate_untrusted_user_message(tmp_path) -> None:
assert "## Current Session" not in messages[0]["content"]
assert len(messages) == 2
+ # Runtime context is now merged with user message into a single message
assert messages[-1]["role"] == "user"
- runtime_content = messages[-1]["content"]
- assert isinstance(runtime_content, str)
- assert ContextBuilder._RUNTIME_CONTEXT_TAG in runtime_content
- assert "Current Time:" in runtime_content
- assert "Channel: cli" in runtime_content
- assert "Chat ID: direct" in runtime_content
- assert "Return exactly: OK" in runtime_content
+ user_content = messages[-1]["content"]
+ assert isinstance(user_content, str)
+ assert ContextBuilder._RUNTIME_CONTEXT_TAG in user_content
+ assert "Current Time:" in user_content
+ assert "Channel: cli" in user_content
+ assert "Chat ID: direct" in user_content
+ assert "Return exactly: OK" in user_content
From bdfe7d6449dab772f681b857ad76796c92b63d05 Mon Sep 17 00:00:00 2001
From: Ben
Date: Thu, 5 Mar 2026 00:16:31 +0800
Subject: [PATCH 17/26] fix(feishu): convert audio type to file for API
compatibility
Feishu's GetMessageResource API only accepts 'image' or 'file' as the
type parameter. When downloading voice messages, nanobot was passing
'audio' which caused the API to reject the request with an error.
This fix converts 'audio' to 'file' in _download_file_sync method
before making the API call, allowing voice messages to be downloaded
and transcribed successfully.
Fixes voice message download failure in Feishu channel.
---
nanobot/channels/feishu.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index 0a0a5e4..a9a32b2 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -530,6 +530,10 @@ class FeishuChannel(BaseChannel):
self, message_id: str, file_key: str, resource_type: str = "file"
) -> tuple[bytes | None, str | None]:
"""Download a file/audio/media from a Feishu message by message_id and file_key."""
+ # Feishu API only accepts 'image' or 'file' as type parameter
+ # Convert 'audio' to 'file' for API compatibility
+ if resource_type == "audio":
+ resource_type = "file"
try:
request = (
GetMessageResourceRequest.builder()
From 0209ad57d9655d8fea5f5e551a4bb89bd0f1691c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Wed, 4 Mar 2026 19:31:39 +0100
Subject: [PATCH 18/26] fix(tests): resolve RequestsDependencyWarning and
lark-oapi asyncio/websockets DeprecationWarnings
---
nanobot/channels/feishu.py | 32 +++++++++++---------------------
pyproject.toml | 1 +
2 files changed, 12 insertions(+), 21 deletions(-)
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index 0a0a5e4..7d26fa8 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -16,26 +16,9 @@ from nanobot.bus.queue import MessageBus
from nanobot.channels.base import BaseChannel
from nanobot.config.schema import FeishuConfig
-try:
- import lark_oapi as lark
- from lark_oapi.api.im.v1 import (
- CreateFileRequest,
- CreateFileRequestBody,
- CreateImageRequest,
- CreateImageRequestBody,
- CreateMessageReactionRequest,
- CreateMessageReactionRequestBody,
- CreateMessageRequest,
- CreateMessageRequestBody,
- Emoji,
- GetMessageResourceRequest,
- P2ImMessageReceiveV1,
- )
- FEISHU_AVAILABLE = True
-except ImportError:
- FEISHU_AVAILABLE = False
- lark = None
- Emoji = None
+import importlib.util
+
+FEISHU_AVAILABLE = importlib.util.find_spec("lark_oapi") is not None
# Message type display mapping
MSG_TYPE_MAP = {
@@ -280,6 +263,7 @@ class FeishuChannel(BaseChannel):
logger.error("Feishu app_id and app_secret not configured")
return
+ import lark_oapi as lark
self._running = True
self._loop = asyncio.get_running_loop()
@@ -340,6 +324,7 @@ class FeishuChannel(BaseChannel):
def _add_reaction_sync(self, message_id: str, emoji_type: str) -> None:
"""Sync helper for adding reaction (runs in thread pool)."""
+ from lark_oapi.api.im.v1 import CreateMessageReactionRequest, CreateMessageReactionRequestBody, Emoji
try:
request = CreateMessageReactionRequest.builder() \
.message_id(message_id) \
@@ -364,7 +349,7 @@ class FeishuChannel(BaseChannel):
Common emoji types: THUMBSUP, OK, EYES, DONE, OnIt, HEART
"""
- if not self._client or not Emoji:
+ if not self._client:
return
loop = asyncio.get_running_loop()
@@ -456,6 +441,7 @@ class FeishuChannel(BaseChannel):
def _upload_image_sync(self, file_path: str) -> str | None:
"""Upload an image to Feishu and return the image_key."""
+ from lark_oapi.api.im.v1 import CreateImageRequest, CreateImageRequestBody
try:
with open(file_path, "rb") as f:
request = CreateImageRequest.builder() \
@@ -479,6 +465,7 @@ class FeishuChannel(BaseChannel):
def _upload_file_sync(self, file_path: str) -> str | None:
"""Upload a file to Feishu and return the file_key."""
+ from lark_oapi.api.im.v1 import CreateFileRequest, CreateFileRequestBody
ext = os.path.splitext(file_path)[1].lower()
file_type = self._FILE_TYPE_MAP.get(ext, "stream")
file_name = os.path.basename(file_path)
@@ -506,6 +493,7 @@ class FeishuChannel(BaseChannel):
def _download_image_sync(self, message_id: str, image_key: str) -> tuple[bytes | None, str | None]:
"""Download an image from Feishu message by message_id and image_key."""
+ from lark_oapi.api.im.v1 import GetMessageResourceRequest
try:
request = GetMessageResourceRequest.builder() \
.message_id(message_id) \
@@ -530,6 +518,7 @@ class FeishuChannel(BaseChannel):
self, message_id: str, file_key: str, resource_type: str = "file"
) -> tuple[bytes | None, str | None]:
"""Download a file/audio/media from a Feishu message by message_id and file_key."""
+ from lark_oapi.api.im.v1 import GetMessageResourceRequest
try:
request = (
GetMessageResourceRequest.builder()
@@ -598,6 +587,7 @@ class FeishuChannel(BaseChannel):
def _send_message_sync(self, receive_id_type: str, receive_id: str, msg_type: str, content: str) -> bool:
"""Send a single message (text/image/file/interactive) synchronously."""
+ from lark_oapi.api.im.v1 import CreateMessageRequest, CreateMessageRequestBody
try:
request = CreateMessageRequest.builder() \
.receive_id_type(receive_id_type) \
diff --git a/pyproject.toml b/pyproject.toml
index 0546523..d384f3f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -42,6 +42,7 @@ dependencies = [
"prompt-toolkit>=3.0.50,<4.0.0",
"mcp>=1.26.0,<2.0.0",
"json-repair>=0.57.0,<1.0.0",
+ "chardet>=3.0.2,<6.0.0",
]
[project.optional-dependencies]
From e032faaeff81d7e4fa39659badbacc7b4004dc05 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Wed, 4 Mar 2026 20:04:00 +0100
Subject: [PATCH 19/26] Merge branch 'main' of upstream/main into
fix/test-failures
---
.gitignore | 2 +-
nanobot/agent/tools/base.py | 2 ++
nanobot/agent/tools/cron.py | 5 ++++-
nanobot/agent/tools/filesystem.py | 11 +++++++++++
nanobot/channels/feishu.py | 6 ++++++
nanobot/providers/openai_codex_provider.py | 3 +++
pyproject.toml | 2 ++
tests/test_context_prompt_cache.py | 1 +
8 files changed, 30 insertions(+), 2 deletions(-)
diff --git a/.gitignore b/.gitignore
index d7b930d..742d593 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,4 +19,4 @@ __pycache__/
poetry.lock
.pytest_cache/
botpy.log
-tests/
+
diff --git a/nanobot/agent/tools/base.py b/nanobot/agent/tools/base.py
index 8dd82c7..051fc9a 100644
--- a/nanobot/agent/tools/base.py
+++ b/nanobot/agent/tools/base.py
@@ -54,6 +54,8 @@ class Tool(ABC):
def validate_params(self, params: dict[str, Any]) -> list[str]:
"""Validate tool parameters against JSON schema. Returns error list (empty if valid)."""
+ if not isinstance(params, dict):
+ return [f"parameters must be an object, got {type(params).__name__}"]
schema = self.parameters or {}
if schema.get("type", "object") != "object":
raise ValueError(f"Schema must be object type, got {schema.get('type')!r}")
diff --git a/nanobot/agent/tools/cron.py b/nanobot/agent/tools/cron.py
index 13b1e12..f8e737b 100644
--- a/nanobot/agent/tools/cron.py
+++ b/nanobot/agent/tools/cron.py
@@ -122,7 +122,10 @@ class CronTool(Tool):
elif at:
from datetime import datetime
- dt = datetime.fromisoformat(at)
+ try:
+ dt = datetime.fromisoformat(at)
+ except ValueError:
+ return f"Error: invalid ISO datetime format '{at}'. Expected format: YYYY-MM-DDTHH:MM:SS"
at_ms = int(dt.timestamp() * 1000)
schedule = CronSchedule(kind="at", at_ms=at_ms)
delete_after = True
diff --git a/nanobot/agent/tools/filesystem.py b/nanobot/agent/tools/filesystem.py
index bbdd49c..7b0b867 100644
--- a/nanobot/agent/tools/filesystem.py
+++ b/nanobot/agent/tools/filesystem.py
@@ -26,6 +26,8 @@ def _resolve_path(
class ReadFileTool(Tool):
"""Tool to read file contents."""
+ _MAX_CHARS = 128_000 # ~128 KB — prevents OOM from reading huge files into LLM context
+
def __init__(self, workspace: Path | None = None, allowed_dir: Path | None = None):
self._workspace = workspace
self._allowed_dir = allowed_dir
@@ -54,7 +56,16 @@ class ReadFileTool(Tool):
if not file_path.is_file():
return f"Error: Not a file: {path}"
+ size = file_path.stat().st_size
+ if size > self._MAX_CHARS * 4: # rough upper bound (UTF-8 chars ≤ 4 bytes)
+ return (
+ f"Error: File too large ({size:,} bytes). "
+ f"Use exec tool with head/tail/grep to read portions."
+ )
+
content = file_path.read_text(encoding="utf-8")
+ if len(content) > self._MAX_CHARS:
+ return content[: self._MAX_CHARS] + f"\n\n... (truncated — file is {len(content):,} chars, limit {self._MAX_CHARS:,})"
return content
except PermissionError as e:
return f"Error: {e}"
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index 7d26fa8..0cd84c3 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -519,6 +519,12 @@ class FeishuChannel(BaseChannel):
) -> tuple[bytes | None, str | None]:
"""Download a file/audio/media from a Feishu message by message_id and file_key."""
from lark_oapi.api.im.v1 import GetMessageResourceRequest
+
+ # Feishu API only accepts 'image' or 'file' as type parameter
+ # Convert 'audio' to 'file' for API compatibility
+ if resource_type == "audio":
+ resource_type = "file"
+
try:
request = (
GetMessageResourceRequest.builder()
diff --git a/nanobot/providers/openai_codex_provider.py b/nanobot/providers/openai_codex_provider.py
index b6afa65..d04e210 100644
--- a/nanobot/providers/openai_codex_provider.py
+++ b/nanobot/providers/openai_codex_provider.py
@@ -52,6 +52,9 @@ class OpenAICodexProvider(LLMProvider):
"parallel_tool_calls": True,
}
+ if reasoning_effort:
+ body["reasoning"] = {"effort": reasoning_effort}
+
if tools:
body["tools"] = _convert_tools(tools)
diff --git a/pyproject.toml b/pyproject.toml
index d384f3f..e5214bd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -43,6 +43,8 @@ dependencies = [
"mcp>=1.26.0,<2.0.0",
"json-repair>=0.57.0,<1.0.0",
"chardet>=3.0.2,<6.0.0",
+ "openai>=2.8.0",
+
]
[project.optional-dependencies]
diff --git a/tests/test_context_prompt_cache.py b/tests/test_context_prompt_cache.py
index fa7f02d..d347e53 100644
--- a/tests/test_context_prompt_cache.py
+++ b/tests/test_context_prompt_cache.py
@@ -55,6 +55,7 @@ def test_runtime_context_is_separate_untrusted_user_message(tmp_path) -> None:
assert "## Current Session" not in messages[0]["content"]
assert len(messages) == 2
+
# Runtime context is now merged with user message into a single message
assert messages[-1]["role"] == "user"
user_content = messages[-1]["content"]
From 97522bfa0309931e53782ed7a4e2cfdc470853c4 Mon Sep 17 00:00:00 2001
From: coldxiangyu
Date: Thu, 5 Mar 2026 17:27:17 +0800
Subject: [PATCH 20/26] fix(feishu): isolate lark ws Client event loop from
main asyncio loop
Commit 0209ad5 moved `import lark_oapi as lark` inside the start()
method (lazy import) to suppress DeprecationWarnings. This had an
unintended side effect: the import now happens after the main asyncio
loop is already running, so lark_oapi's module-level
loop = asyncio.get_event_loop()
captures the running main loop. When the WebSocket thread then calls
loop.run_until_complete() inside Client.start(), Python raises:
RuntimeError: This event loop is already running
and the _connect/_disconnect coroutines are never awaited.
Fix: in run_ws(), create a fresh event loop with asyncio.new_event_loop(),
set it as the thread's current loop, and patch lark_oapi.ws.client.loop
to point to this dedicated loop before calling Client.start(). The loop
is closed on thread exit.
Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com>
---
nanobot/channels/feishu.py | 30 +++++++++++++++++++++---------
1 file changed, 21 insertions(+), 9 deletions(-)
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index 0cd84c3..fcb70a8 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -290,16 +290,28 @@ class FeishuChannel(BaseChannel):
log_level=lark.LogLevel.INFO
)
- # Start WebSocket client in a separate thread with reconnect loop
+ # Start WebSocket client in a separate thread with reconnect loop.
+ # A dedicated event loop is created for this thread so that lark_oapi's
+ # module-level `loop = asyncio.get_event_loop()` picks up an idle loop
+ # instead of the already-running main asyncio loop, which would cause
+ # "This event loop is already running" errors.
def run_ws():
- while self._running:
- try:
- self._ws_client.start()
- except Exception as e:
- logger.warning("Feishu WebSocket error: {}", e)
- if self._running:
- import time
- time.sleep(5)
+ import time
+ import lark_oapi.ws.client as _lark_ws_client
+ ws_loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(ws_loop)
+ # Patch the module-level loop used by lark's ws Client.start()
+ _lark_ws_client.loop = ws_loop
+ try:
+ while self._running:
+ try:
+ self._ws_client.start()
+ except Exception as e:
+ logger.warning("Feishu WebSocket error: {}", e)
+ if self._running:
+ time.sleep(5)
+ finally:
+ ws_loop.close()
self._ws_thread = threading.Thread(target=run_ws, daemon=True)
self._ws_thread.start()
From fb77176cfd41b50b3495ffa99cfc22bb6cbd4ed1 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:25:46 +0000
Subject: [PATCH 21/26] feat(custom-provider): keep instance-level session
affinity header for cache locality
---
nanobot/providers/custom_provider.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/nanobot/providers/custom_provider.py b/nanobot/providers/custom_provider.py
index 02183f3..66df734 100644
--- a/nanobot/providers/custom_provider.py
+++ b/nanobot/providers/custom_provider.py
@@ -16,6 +16,7 @@ class CustomProvider(LLMProvider):
def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"):
super().__init__(api_key, api_base)
self.default_model = default_model
+ # Keep affinity stable for this provider instance to improve backend cache locality.
self._client = AsyncOpenAI(
api_key=api_key,
base_url=api_base,
From 06fcd2cc3fed18667672f638a6c7cc54f8d5f736 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:33:14 +0000
Subject: [PATCH 22/26] fix(discord): correct group_policy default to mention
and style cleanup
---
nanobot/channels/discord.py | 6 ++----
nanobot/config/schema.py | 2 +-
2 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/nanobot/channels/discord.py b/nanobot/channels/discord.py
index 85ff28a..900c17b 100644
--- a/nanobot/channels/discord.py
+++ b/nanobot/channels/discord.py
@@ -174,7 +174,7 @@ class DiscordChannel(BaseChannel):
# Capture bot user ID for mention detection
user_data = payload.get("user") or {}
self._bot_user_id = user_data.get("id")
- logger.info(f"Discord bot connected as user {self._bot_user_id}")
+ logger.info("Discord bot connected as user {}", self._bot_user_id)
elif op == 0 and event_type == "MESSAGE_CREATE":
await self._handle_message_create(payload)
elif op == 7:
@@ -287,8 +287,6 @@ class DiscordChannel(BaseChannel):
def _should_respond_in_group(self, payload: dict[str, Any], content: str) -> bool:
"""Check if bot should respond in a group channel based on policy."""
- channel_id = str(payload.get("channel_id", ""))
-
if self.config.group_policy == "open":
return True
@@ -303,7 +301,7 @@ class DiscordChannel(BaseChannel):
# Also check content for mention format <@USER_ID>
if f"<@{self._bot_user_id}>" in content or f"<@!{self._bot_user_id}>" in content:
return True
- logger.debug(f"Discord message in {channel_id} ignored (bot not mentioned)")
+ logger.debug("Discord message in {} ignored (bot not mentioned)", payload.get("channel_id"))
return False
return True
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index a6b609b..9d7da3b 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -62,7 +62,7 @@ class DiscordConfig(Base):
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs
gateway_url: str = "wss://gateway.discord.gg/?v=10&encoding=json"
intents: int = 37377 # GUILDS + GUILD_MESSAGES + DIRECT_MESSAGES + MESSAGE_CONTENT
- group_policy: str = "open" # "mention" or "open"
+ group_policy: Literal["mention", "open"] = "mention"
class MatrixConfig(Base):
From b71c1bdca7dd0aa6323d7b8074bf4be25aa44a9b Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:44:45 +0000
Subject: [PATCH 23/26] fix(mcp): hoist sse/http imports, annotate
auto-detection heuristic, restore field comments
---
README.md | 4 ++--
nanobot/agent/tools/mcp.py | 6 +++---
nanobot/config/schema.py | 14 +++++++-------
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/README.md b/README.md
index 6c9304d..5bc70b8 100644
--- a/README.md
+++ b/README.md
@@ -14,9 +14,9 @@
🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw)
-⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines.
+⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw, making it more customizable and understandable.
-📏 Real-time line count: **3,935 lines** (run `bash core_agent_lines.sh` to verify anytime)
+📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime
## 📢 News
diff --git a/nanobot/agent/tools/mcp.py b/nanobot/agent/tools/mcp.py
index 151aa55..2cbffd0 100644
--- a/nanobot/agent/tools/mcp.py
+++ b/nanobot/agent/tools/mcp.py
@@ -58,7 +58,9 @@ async def connect_mcp_servers(
) -> None:
"""Connect to configured MCP servers and register their tools."""
from mcp import ClientSession, StdioServerParameters
+ from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client
+ from mcp.client.streamable_http import streamable_http_client
for name, cfg in mcp_servers.items():
try:
@@ -67,6 +69,7 @@ async def connect_mcp_servers(
if cfg.command:
transport_type = "stdio"
elif cfg.url:
+ # Convention: URLs ending with /sse use SSE transport; others use streamableHttp
transport_type = (
"sse" if cfg.url.rstrip("/").endswith("/sse") else "streamableHttp"
)
@@ -80,8 +83,6 @@ async def connect_mcp_servers(
)
read, write = await stack.enter_async_context(stdio_client(params))
elif transport_type == "sse":
- from mcp.client.sse import sse_client
-
def httpx_client_factory(
headers: dict[str, str] | None = None,
timeout: httpx.Timeout | None = None,
@@ -99,7 +100,6 @@ async def connect_mcp_servers(
sse_client(cfg.url, httpx_client_factory=httpx_client_factory)
)
elif transport_type == "streamableHttp":
- from mcp.client.streamable_http import streamable_http_client
# Always provide an explicit httpx client so MCP HTTP transport does not
# inherit httpx's default 5s timeout and preempt the higher-level tool timeout.
http_client = await stack.enter_async_context(
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 9f2e5b3..1f2f946 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -329,13 +329,13 @@ class ExecToolConfig(Base):
class MCPServerConfig(Base):
"""MCP server connection configuration (stdio or HTTP)."""
- type: Literal["stdio", "sse", "streamableHttp"] | None = None
- command: str = ""
- args: list[str] = Field(default_factory=list)
- env: dict[str, str] = Field(default_factory=dict)
- url: str = ""
- headers: dict[str, str] = Field(default_factory=dict)
- tool_timeout: int = 30
+ type: Literal["stdio", "sse", "streamableHttp"] | None = None # auto-detected if omitted
+ command: str = "" # Stdio: command to run (e.g. "npx")
+ args: list[str] = Field(default_factory=list) # Stdio: command arguments
+ env: dict[str, str] = Field(default_factory=dict) # Stdio: extra env vars
+ url: str = "" # HTTP/SSE: endpoint URL
+ headers: dict[str, str] = Field(default_factory=dict) # HTTP/SSE: custom headers
+ tool_timeout: int = 30 # seconds before a tool call is cancelled
class ToolsConfig(Base):
From 57d8aefc2289144339640be677d5d4e3edfdcb6f Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:46:03 +0000
Subject: [PATCH 24/26] docs: update introduction of nanobot
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 5bc70b8..4c5e9a6 100644
--- a/README.md
+++ b/README.md
@@ -12,11 +12,11 @@
-🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw)
+🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw).
⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw, making it more customizable and understandable.
-📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime
+📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime.
## 📢 News
From cd0bcc162e5a742e452918c4835384774d7a7938 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:48:57 +0000
Subject: [PATCH 25/26] docs: update introduction of nanobot
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 4c5e9a6..1374fb8 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@
🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw).
-⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw, making it more customizable and understandable.
+⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw.
📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime.
From 0343d66224007d6d7964984db7741ae710c81167 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:54:53 +0000
Subject: [PATCH 26/26] fix(gateway): remove duplicate load_config() that
overwrote custom workspace/config
---
nanobot/cli/commands.py | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 05e2cbe..b097059 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -244,7 +244,7 @@ def _make_provider(config: Config):
@app.command()
def gateway(
port: int = typer.Option(18790, "--port", "-p", help="Gateway port"),
- workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory (default: ~/.nanobot/workspace)"),
+ workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory"),
config: str | None = typer.Option(None, "--config", "-c", help="Config file path"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
):
@@ -252,7 +252,7 @@ def gateway(
from nanobot.agent.loop import AgentLoop
from nanobot.bus.queue import MessageBus
from nanobot.channels.manager import ChannelManager
- from nanobot.config.loader import get_data_dir, load_config
+ from nanobot.config.loader import load_config
from nanobot.cron.service import CronService
from nanobot.cron.types import CronJob
from nanobot.heartbeat.service import HeartbeatService
@@ -262,17 +262,12 @@ def gateway(
import logging
logging.basicConfig(level=logging.DEBUG)
- # Load config from custom path if provided, otherwise use default
config_path = Path(config) if config else None
config = load_config(config_path)
-
- # Override workspace if specified via command line
if workspace:
config.agents.defaults.workspace = workspace
console.print(f"{__logo__} Starting nanobot gateway on port {port}...")
-
- config = load_config()
sync_workspace_templates(config.workspace_path)
bus = MessageBus()
provider = _make_provider(config)