From 8571df2e634809b396b08b0967957078c15ede7f Mon Sep 17 00:00:00 2001
From: zerone0x
Date: Sun, 1 Mar 2026 15:13:44 +0100
Subject: [PATCH 01/28] fix(feishu): split card messages when content has
multiple tables
Feishu rejects interactive cards that contain more than one table element
(API error 11310: card table number over limit).
Add FeishuChannel._split_elements_by_table_limit() which partitions the flat
card-elements list into groups of at most one table each. The send() method
now iterates over these groups and sends each as its own card message, so all
tables are delivered to the user instead of the entire message being dropped.
Single-table and table-free messages are unaffected (one card, same as before).
Fixes #1382
---
nanobot/channels/feishu.py | 40 ++++++++++--
tests/test_feishu_table_split.py | 104 +++++++++++++++++++++++++++++++
2 files changed, 139 insertions(+), 5 deletions(-)
create mode 100644 tests/test_feishu_table_split.py
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index 0a0a5e4..9ab1d50 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -413,6 +413,34 @@ class FeishuChannel(BaseChannel):
elements.extend(self._split_headings(remaining))
return elements or [{"tag": "markdown", "content": content}]
+ @staticmethod
+ def _split_elements_by_table_limit(elements: list[dict], max_tables: int = 1) -> list[list[dict]]:
+ """Split card elements into groups with at most *max_tables* table elements each.
+
+ Feishu cards have a hard limit of one table per card (API error 11310).
+ When the rendered content contains multiple markdown tables each table is
+ placed in a separate card message so every table reaches the user.
+ """
+ if not elements:
+ return [[]]
+ groups: list[list[dict]] = []
+ current: list[dict] = []
+ table_count = 0
+ for el in elements:
+ if el.get("tag") == "table":
+ if table_count >= max_tables:
+ if current:
+ groups.append(current)
+ current = []
+ table_count = 0
+ current.append(el)
+ table_count += 1
+ else:
+ current.append(el)
+ if current:
+ groups.append(current)
+ return groups or [[]]
+
def _split_headings(self, content: str) -> list[dict]:
"""Split content by headings, converting headings to div elements."""
protected = content
@@ -653,11 +681,13 @@ class FeishuChannel(BaseChannel):
)
if msg.content and msg.content.strip():
- card = {"config": {"wide_screen_mode": True}, "elements": self._build_card_elements(msg.content)}
- await loop.run_in_executor(
- None, self._send_message_sync,
- receive_id_type, msg.chat_id, "interactive", json.dumps(card, ensure_ascii=False),
- )
+ elements = self._build_card_elements(msg.content)
+ for chunk in self._split_elements_by_table_limit(elements):
+ card = {"config": {"wide_screen_mode": True}, "elements": chunk}
+ await loop.run_in_executor(
+ None, self._send_message_sync,
+ receive_id_type, msg.chat_id, "interactive", json.dumps(card, ensure_ascii=False),
+ )
except Exception as e:
logger.error("Error sending Feishu message: {}", e)
diff --git a/tests/test_feishu_table_split.py b/tests/test_feishu_table_split.py
new file mode 100644
index 0000000..af8fa16
--- /dev/null
+++ b/tests/test_feishu_table_split.py
@@ -0,0 +1,104 @@
+"""Tests for FeishuChannel._split_elements_by_table_limit.
+
+Feishu cards reject messages that contain more than one table element
+(API error 11310: card table number over limit). The helper splits a flat
+list of card elements into groups so that each group contains at most one
+table, allowing nanobot to send multiple cards instead of failing.
+"""
+
+from nanobot.channels.feishu import FeishuChannel
+
+
+def _md(text: str) -> dict:
+ return {"tag": "markdown", "content": text}
+
+
+def _table() -> dict:
+ return {
+ "tag": "table",
+ "columns": [{"tag": "column", "name": "c0", "display_name": "A", "width": "auto"}],
+ "rows": [{"c0": "v"}],
+ "page_size": 2,
+ }
+
+
+split = FeishuChannel._split_elements_by_table_limit
+
+
+def test_empty_list_returns_single_empty_group() -> None:
+ assert split([]) == [[]]
+
+
+def test_no_tables_returns_single_group() -> None:
+ els = [_md("hello"), _md("world")]
+ result = split(els)
+ assert result == [els]
+
+
+def test_single_table_stays_in_one_group() -> None:
+ els = [_md("intro"), _table(), _md("outro")]
+ result = split(els)
+ assert len(result) == 1
+ assert result[0] == els
+
+
+def test_two_tables_split_into_two_groups() -> None:
+ # Use different row values so the two tables are not equal
+ t1 = {
+ "tag": "table",
+ "columns": [{"tag": "column", "name": "c0", "display_name": "A", "width": "auto"}],
+ "rows": [{"c0": "table-one"}],
+ "page_size": 2,
+ }
+ t2 = {
+ "tag": "table",
+ "columns": [{"tag": "column", "name": "c0", "display_name": "B", "width": "auto"}],
+ "rows": [{"c0": "table-two"}],
+ "page_size": 2,
+ }
+ els = [_md("before"), t1, _md("between"), t2, _md("after")]
+ result = split(els)
+ assert len(result) == 2
+ # First group: text before table-1 + table-1
+ assert t1 in result[0]
+ assert t2 not in result[0]
+ # Second group: text between tables + table-2 + text after
+ assert t2 in result[1]
+ assert t1 not in result[1]
+
+
+def test_three_tables_split_into_three_groups() -> None:
+ tables = [
+ {"tag": "table", "columns": [], "rows": [{"c0": f"t{i}"}], "page_size": 1}
+ for i in range(3)
+ ]
+ els = tables[:]
+ result = split(els)
+ assert len(result) == 3
+ for i, group in enumerate(result):
+ assert tables[i] in group
+
+
+def test_leading_markdown_stays_with_first_table() -> None:
+ intro = _md("intro")
+ t = _table()
+ result = split([intro, t])
+ assert len(result) == 1
+ assert result[0] == [intro, t]
+
+
+def test_trailing_markdown_after_second_table() -> None:
+ t1, t2 = _table(), _table()
+ tail = _md("end")
+ result = split([t1, t2, tail])
+ assert len(result) == 2
+ assert result[1] == [t2, tail]
+
+
+def test_non_table_elements_before_first_table_kept_in_first_group() -> None:
+ head = _md("head")
+ t1, t2 = _table(), _table()
+ result = split([head, t1, t2])
+ # head + t1 in group 0; t2 in group 1
+ assert result[0] == [head, t1]
+ assert result[1] == [t2]
From ae788a17f8371de0d60b7b9d713bdb8261fa6cd2 Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Mon, 2 Mar 2026 11:03:54 +0800
Subject: [PATCH 02/28] chore: add .worktrees to .gitignore
Co-Authored-By: Claude Opus 4.6
---
.gitignore | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitignore b/.gitignore
index d7b930d..a543251 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+.worktrees/
.assets
.env
*.pyc
From aed1ef55298433a963474d8fbdcf0b203945ffb5 Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Mon, 2 Mar 2026 11:04:53 +0800
Subject: [PATCH 03/28] fix: add SIGTERM, SIGHUP handling and ignore SIGPIPE
- Add handler for SIGTERM to prevent "Terminated" message on Linux
- Add handler for SIGHUP for terminal closure handling
- Ignore SIGPIPE to prevent silent process termination
- Change os._exit(0) to sys.exit(0) for proper cleanup
Fixes issue #1365
Co-Authored-By: Claude Opus 4.6
---
nanobot/cli/commands.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 2662e9f..8c53992 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -501,12 +501,17 @@ def agent(
else:
cli_channel, cli_chat_id = "cli", session_id
- def _exit_on_sigint(signum, frame):
+ def _handle_signal(signum, frame):
+ sig_name = signal.Signals(signum).name
_restore_terminal()
- console.print("\nGoodbye!")
- os._exit(0)
+ console.print(f"\nReceived {sig_name}, goodbye!")
+ sys.exit(0)
- signal.signal(signal.SIGINT, _exit_on_sigint)
+ signal.signal(signal.SIGINT, _handle_signal)
+ signal.signal(signal.SIGTERM, _handle_signal)
+ signal.signal(signal.SIGHUP, _handle_signal)
+ # Ignore SIGPIPE to prevent silent process termination when writing to closed pipes
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
async def run_interactive():
bus_task = asyncio.create_task(agent_loop.run())
From e9d023f52cbc7fb8eab37ab5aa4a501b0b5bdc81 Mon Sep 17 00:00:00 2001
From: Joel Chan
Date: Thu, 12 Feb 2026 17:10:50 +0800
Subject: [PATCH 04/28] feat(discord): add group policy to control group
respond behaviour
---
README.md | 8 +++++++-
nanobot/channels/discord.py | 36 +++++++++++++++++++++++++++++++++++-
nanobot/config/schema.py | 1 +
3 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 45779e7..f141a1c 100644
--- a/README.md
+++ b/README.md
@@ -293,12 +293,18 @@ If you prefer to configure manually, add the following to `~/.nanobot/config.jso
"discord": {
"enabled": true,
"token": "YOUR_BOT_TOKEN",
- "allowFrom": ["YOUR_USER_ID"]
+ "allowFrom": ["YOUR_USER_ID"],
+ "groupPolicy": "mention"
}
}
}
```
+> `groupPolicy` controls how the bot responds in group channels:
+> - `"mention"` (default) — Only respond when @mentioned
+> - `"open"` — Respond to all messages
+> DMs always respond when the sender is in `allowFrom`.
+
**5. Invite the bot**
- OAuth2 → URL Generator
- Scopes: `bot`
diff --git a/nanobot/channels/discord.py b/nanobot/channels/discord.py
index 57e5922..85ff28a 100644
--- a/nanobot/channels/discord.py
+++ b/nanobot/channels/discord.py
@@ -54,6 +54,7 @@ class DiscordChannel(BaseChannel):
self._heartbeat_task: asyncio.Task | None = None
self._typing_tasks: dict[str, asyncio.Task] = {}
self._http: httpx.AsyncClient | None = None
+ self._bot_user_id: str | None = None
async def start(self) -> None:
"""Start the Discord gateway connection."""
@@ -170,6 +171,10 @@ class DiscordChannel(BaseChannel):
await self._identify()
elif op == 0 and event_type == "READY":
logger.info("Discord gateway READY")
+ # Capture bot user ID for mention detection
+ user_data = payload.get("user") or {}
+ self._bot_user_id = user_data.get("id")
+ logger.info(f"Discord bot connected as user {self._bot_user_id}")
elif op == 0 and event_type == "MESSAGE_CREATE":
await self._handle_message_create(payload)
elif op == 7:
@@ -226,6 +231,7 @@ class DiscordChannel(BaseChannel):
sender_id = str(author.get("id", ""))
channel_id = str(payload.get("channel_id", ""))
content = payload.get("content") or ""
+ guild_id = payload.get("guild_id")
if not sender_id or not channel_id:
return
@@ -233,6 +239,11 @@ class DiscordChannel(BaseChannel):
if not self.is_allowed(sender_id):
return
+ # Check group channel policy (DMs always respond if is_allowed passes)
+ if guild_id is not None:
+ if not self._should_respond_in_group(payload, content):
+ return
+
content_parts = [content] if content else []
media_paths: list[str] = []
media_dir = Path.home() / ".nanobot" / "media"
@@ -269,11 +280,34 @@ class DiscordChannel(BaseChannel):
media=media_paths,
metadata={
"message_id": str(payload.get("id", "")),
- "guild_id": payload.get("guild_id"),
+ "guild_id": guild_id,
"reply_to": reply_to,
},
)
+ def _should_respond_in_group(self, payload: dict[str, Any], content: str) -> bool:
+ """Check if bot should respond in a group channel based on policy."""
+ channel_id = str(payload.get("channel_id", ""))
+
+ if self.config.group_policy == "open":
+ return True
+
+ if self.config.group_policy == "mention":
+ # Check if bot was mentioned in the message
+ if self._bot_user_id:
+ # Check mentions array
+ mentions = payload.get("mentions") or []
+ for mention in mentions:
+ if str(mention.get("id")) == self._bot_user_id:
+ return True
+ # Also check content for mention format <@USER_ID>
+ if f"<@{self._bot_user_id}>" in content or f"<@!{self._bot_user_id}>" in content:
+ return True
+ logger.debug(f"Discord message in {channel_id} ignored (bot not mentioned)")
+ return False
+
+ return True
+
async def _start_typing(self, channel_id: str) -> None:
"""Start periodic typing indicator for a channel."""
await self._stop_typing(channel_id)
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 6b80c81..e3d3d23 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -62,6 +62,7 @@ class DiscordConfig(Base):
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs
gateway_url: str = "wss://gateway.discord.gg/?v=10&encoding=json"
intents: int = 37377 # GUILDS + GUILD_MESSAGES + DIRECT_MESSAGES + MESSAGE_CONTENT
+ group_policy: str = "open" # "mention" or "open"
class MatrixConfig(Base):
From ecdfaf0a5a00b0772719aa306d4cb36d8512f9c7 Mon Sep 17 00:00:00 2001
From: David Markey
Date: Sun, 1 Mar 2026 20:49:00 +0000
Subject: [PATCH 05/28] feat(custom-provider): add x-session-affinity header
for prompt caching
---
nanobot/providers/custom_provider.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/nanobot/providers/custom_provider.py b/nanobot/providers/custom_provider.py
index 56e6270..02183f3 100644
--- a/nanobot/providers/custom_provider.py
+++ b/nanobot/providers/custom_provider.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import uuid
from typing import Any
import json_repair
@@ -15,7 +16,11 @@ class CustomProvider(LLMProvider):
def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"):
super().__init__(api_key, api_base)
self.default_model = default_model
- self._client = AsyncOpenAI(api_key=api_key, base_url=api_base)
+ self._client = AsyncOpenAI(
+ api_key=api_key,
+ base_url=api_base,
+ default_headers={"x-session-affinity": uuid.uuid4().hex},
+ )
async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None,
model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7,
From 8f4baaa5ce750fc073921fa29e734a0fe0da2056 Mon Sep 17 00:00:00 2001
From: chengyongru <2755839590@qq.com>
Date: Mon, 2 Mar 2026 22:01:02 +0800
Subject: [PATCH 06/28] feat(gateway): support multiple instances with
--workspace and --config options
- Add --workspace/-w flag to specify workspace directory
- Add --config/-c flag to specify config file path
- Move cron store to workspace directory for per-instance isolation
- Enable running multiple nanobot instances simultaneously
---
README.md | 27 +++++++++++++++++++++++++++
nanobot/cli/commands.py | 13 ++++++++++++-
2 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 01da228..3022708 100644
--- a/README.md
+++ b/README.md
@@ -884,6 +884,33 @@ MCP tools are automatically discovered and registered on startup. The LLM can us
| `channels.*.allowFrom` | `[]` (allow all) | Whitelist of user IDs. Empty = allow everyone; non-empty = only listed users can interact. |
+## Multiple Instances
+
+Run multiple nanobot instances simultaneously, each with its own workspace and configuration.
+
+```bash
+# Instance A - Telegram bot
+nanobot gateway -w ~/.nanobot/botA -p 18791
+
+# Instance B - Discord bot
+nanobot gateway -w ~/.nanobot/botB -p 18792
+
+# Instance C - Using custom config file
+nanobot gateway -w ~/.nanobot/botC -c ~/.nanobot/botC/config.json -p 18793
+```
+
+| Option | Short | Description |
+|--------|-------|-------------|
+| `--workspace` | `-w` | Workspace directory (default: `~/.nanobot/workspace`) |
+| `--config` | `-c` | Config file path (default: `~/.nanobot/config.json`) |
+| `--port` | `-p` | Gateway port (default: `18790`) |
+
+Each instance has its own:
+- Workspace directory (MEMORY.md, HEARTBEAT.md, session files)
+- Cron jobs storage (`workspace/cron/jobs.json`)
+- Configuration (if using `--config`)
+
+
## CLI Reference
| Command | Description |
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 2662e9f..e599b11 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -244,6 +244,8 @@ def _make_provider(config: Config):
@app.command()
def gateway(
port: int = typer.Option(18790, "--port", "-p", help="Gateway port"),
+ workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory (default: ~/.nanobot/workspace)"),
+ config: str | None = typer.Option(None, "--config", "-c", help="Config file path"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
):
"""Start the nanobot gateway."""
@@ -260,6 +262,14 @@ def gateway(
import logging
logging.basicConfig(level=logging.DEBUG)
+ # Load config from custom path if provided, otherwise use default
+ config_path = Path(config) if config else None
+ config = load_config(config_path)
+
+ # Override workspace if specified via command line
+ if workspace:
+ config.agents.defaults.workspace = workspace
+
console.print(f"{__logo__} Starting nanobot gateway on port {port}...")
config = load_config()
@@ -269,7 +279,8 @@ def gateway(
session_manager = SessionManager(config.workspace_path)
# Create cron service first (callback set after agent creation)
- cron_store_path = get_data_dir() / "cron" / "jobs.json"
+ # Use workspace path for per-instance cron store
+ cron_store_path = config.workspace_path / "cron" / "jobs.json"
cron = CronService(cron_store_path)
# Create agent with cron service
From 102b9716ed154782a7d17be720e0a4a888889156 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Tue, 3 Mar 2026 17:16:08 +0100
Subject: [PATCH 07/28] feat: Implement Telegram draft/progress messages
(streaming)
---
nanobot/channels/telegram.py | 38 ++++++++++++++++++++++++++----------
pyproject.toml | 5 ++++-
2 files changed, 32 insertions(+), 11 deletions(-)
diff --git a/nanobot/channels/telegram.py b/nanobot/channels/telegram.py
index c290535..5f739e5 100644
--- a/nanobot/channels/telegram.py
+++ b/nanobot/channels/telegram.py
@@ -269,23 +269,41 @@ class TelegramChannel(BaseChannel):
# Send text content
if msg.content and msg.content != "[empty message]":
+ is_progress = msg.metadata.get("_progress", False)
+ draft_id = msg.metadata.get("message_id")
+
for chunk in _split_message(msg.content):
try:
html = _markdown_to_telegram_html(chunk)
- await self._app.bot.send_message(
- chat_id=chat_id,
- text=html,
- parse_mode="HTML",
- reply_parameters=reply_params
- )
- except Exception as e:
- logger.warning("HTML parse failed, falling back to plain text: {}", e)
- try:
+ if is_progress and draft_id:
+ await self._app.bot.send_message_draft(
+ chat_id=chat_id,
+ draft_id=draft_id,
+ text=html,
+ parse_mode="HTML"
+ )
+ else:
await self._app.bot.send_message(
chat_id=chat_id,
- text=chunk,
+ text=html,
+ parse_mode="HTML",
reply_parameters=reply_params
)
+ except Exception as e:
+ logger.warning("HTML parse failed (or draft send failed), falling back to plain text: {}", e)
+ try:
+ if is_progress and draft_id:
+ await self._app.bot.send_message_draft(
+ chat_id=chat_id,
+ draft_id=draft_id,
+ text=chunk
+ )
+ else:
+ await self._app.bot.send_message(
+ chat_id=chat_id,
+ text=chunk,
+ reply_parameters=reply_params
+ )
except Exception as e2:
logger.error("Error sending Telegram message: {}", e2)
diff --git a/pyproject.toml b/pyproject.toml
index a22053c..42f6194 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,7 +30,7 @@ dependencies = [
"rich>=14.0.0,<15.0.0",
"croniter>=6.0.0,<7.0.0",
"dingtalk-stream>=0.24.0,<1.0.0",
- "python-telegram-bot[socks]>=22.0,<23.0",
+ "python-telegram-bot[socks] @ git+https://github.com/python-telegram-bot/python-telegram-bot.git@master",
"lark-oapi>=1.5.0,<2.0.0",
"socksio>=1.0.0,<2.0.0",
"python-socketio>=5.16.0,<6.0.0",
@@ -63,6 +63,9 @@ nanobot = "nanobot.cli.commands:app"
requires = ["hatchling"]
build-backend = "hatchling.build"
+[tool.hatch.metadata]
+allow-direct-references = true
+
[tool.hatch.build.targets.wheel]
packages = ["nanobot"]
From 3e83425142334c6d712c210ac73254488f749150 Mon Sep 17 00:00:00 2001
From: worenidewen
Date: Wed, 4 Mar 2026 01:06:04 +0800
Subject: [PATCH 08/28] feat(mcp): add SSE transport support with
auto-detection
---
nanobot/agent/tools/mcp.py | 37 ++++++++++++++++++--
nanobot/config/schema.py | 72 ++++++++++++++++++++++++++------------
2 files changed, 83 insertions(+), 26 deletions(-)
diff --git a/nanobot/agent/tools/mcp.py b/nanobot/agent/tools/mcp.py
index 37464e1..151aa55 100644
--- a/nanobot/agent/tools/mcp.py
+++ b/nanobot/agent/tools/mcp.py
@@ -62,12 +62,43 @@ async def connect_mcp_servers(
for name, cfg in mcp_servers.items():
try:
- if cfg.command:
+ transport_type = cfg.type
+ if not transport_type:
+ if cfg.command:
+ transport_type = "stdio"
+ elif cfg.url:
+ transport_type = (
+ "sse" if cfg.url.rstrip("/").endswith("/sse") else "streamableHttp"
+ )
+ else:
+ logger.warning("MCP server '{}': no command or url configured, skipping", name)
+ continue
+
+ if transport_type == "stdio":
params = StdioServerParameters(
command=cfg.command, args=cfg.args, env=cfg.env or None
)
read, write = await stack.enter_async_context(stdio_client(params))
- elif cfg.url:
+ elif transport_type == "sse":
+ from mcp.client.sse import sse_client
+
+ def httpx_client_factory(
+ headers: dict[str, str] | None = None,
+ timeout: httpx.Timeout | None = None,
+ auth: httpx.Auth | None = None,
+ ) -> httpx.AsyncClient:
+ merged_headers = {**(cfg.headers or {}), **(headers or {})}
+ return httpx.AsyncClient(
+ headers=merged_headers or None,
+ follow_redirects=True,
+ timeout=timeout,
+ auth=auth,
+ )
+
+ read, write = await stack.enter_async_context(
+ sse_client(cfg.url, httpx_client_factory=httpx_client_factory)
+ )
+ elif transport_type == "streamableHttp":
from mcp.client.streamable_http import streamable_http_client
# Always provide an explicit httpx client so MCP HTTP transport does not
# inherit httpx's default 5s timeout and preempt the higher-level tool timeout.
@@ -82,7 +113,7 @@ async def connect_mcp_servers(
streamable_http_client(cfg.url, http_client=http_client)
)
else:
- logger.warning("MCP server '{}': no command or url configured, skipping", name)
+ logger.warning("MCP server '{}': unknown transport type '{}'", name, transport_type)
continue
session = await stack.enter_async_context(ClientSession(read, write))
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 61a7bd2..64e60dc 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -29,7 +29,9 @@ class TelegramConfig(Base):
enabled: bool = False
token: str = "" # Bot token from @BotFather
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs or usernames
- proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ proxy: str | None = (
+ None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ )
reply_to_message: bool = False # If true, bot replies quote the original message
@@ -42,7 +44,9 @@ class FeishuConfig(Base):
encrypt_key: str = "" # Encrypt Key for event subscription (optional)
verification_token: str = "" # Verification Token for event subscription (optional)
allow_from: list[str] = Field(default_factory=list) # Allowed user open_ids
- react_emoji: str = "THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE)
+ react_emoji: str = (
+ "THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE)
+ )
class DingTalkConfig(Base):
@@ -72,9 +76,13 @@ class MatrixConfig(Base):
access_token: str = ""
user_id: str = "" # @bot:matrix.org
device_id: str = ""
- e2ee_enabled: bool = True # Enable Matrix E2EE support (encryption + encrypted room handling).
- sync_stop_grace_seconds: int = 2 # Max seconds to wait for sync_forever to stop gracefully before cancellation fallback.
- max_media_bytes: int = 20 * 1024 * 1024 # Max attachment size accepted for Matrix media handling (inbound + outbound).
+ e2ee_enabled: bool = True # Enable Matrix E2EE support (encryption + encrypted room handling).
+ sync_stop_grace_seconds: int = (
+ 2 # Max seconds to wait for sync_forever to stop gracefully before cancellation fallback.
+ )
+ max_media_bytes: int = (
+ 20 * 1024 * 1024
+ ) # Max attachment size accepted for Matrix media handling (inbound + outbound).
allow_from: list[str] = Field(default_factory=list)
group_policy: Literal["open", "mention", "allowlist"] = "open"
group_allow_from: list[str] = Field(default_factory=list)
@@ -105,7 +113,9 @@ class EmailConfig(Base):
from_address: str = ""
# Behavior
- auto_reply_enabled: bool = True # If false, inbound email is read but no automatic reply is sent
+ auto_reply_enabled: bool = (
+ True # If false, inbound email is read but no automatic reply is sent
+ )
poll_interval_seconds: int = 30
mark_seen: bool = True
max_body_chars: int = 12000
@@ -183,27 +193,32 @@ class QQConfig(Base):
enabled: bool = False
app_id: str = "" # 机器人 ID (AppID) from q.qq.com
secret: str = "" # 机器人密钥 (AppSecret) from q.qq.com
- allow_from: list[str] = Field(default_factory=list) # Allowed user openids (empty = public access)
+ allow_from: list[str] = Field(
+ default_factory=list
+ ) # Allowed user openids (empty = public access)
+
class MatrixConfig(Base):
"""Matrix (Element) channel configuration."""
+
enabled: bool = False
homeserver: str = "https://matrix.org"
access_token: str = ""
- user_id: str = "" # e.g. @bot:matrix.org
+ user_id: str = "" # e.g. @bot:matrix.org
device_id: str = ""
- e2ee_enabled: bool = True # end-to-end encryption support
- sync_stop_grace_seconds: int = 2 # graceful sync_forever shutdown timeout
- max_media_bytes: int = 20 * 1024 * 1024 # inbound + outbound attachment limit
+ e2ee_enabled: bool = True # end-to-end encryption support
+ sync_stop_grace_seconds: int = 2 # graceful sync_forever shutdown timeout
+ max_media_bytes: int = 20 * 1024 * 1024 # inbound + outbound attachment limit
allow_from: list[str] = Field(default_factory=list)
group_policy: Literal["open", "mention", "allowlist"] = "open"
group_allow_from: list[str] = Field(default_factory=list)
allow_room_mentions: bool = False
+
class ChannelsConfig(Base):
"""Configuration for chat channels."""
- send_progress: bool = True # stream agent's text progress to the channel
+ send_progress: bool = True # stream agent's text progress to the channel
send_tool_hints: bool = False # stream tool-call hints (e.g. read_file("…"))
whatsapp: WhatsAppConfig = Field(default_factory=WhatsAppConfig)
telegram: TelegramConfig = Field(default_factory=TelegramConfig)
@@ -222,7 +237,9 @@ class AgentDefaults(Base):
workspace: str = "~/.nanobot/workspace"
model: str = "anthropic/claude-opus-4-5"
- provider: str = "auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection
+ provider: str = (
+ "auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection
+ )
max_tokens: int = 8192
temperature: float = 0.1
max_tool_iterations: int = 40
@@ -260,8 +277,12 @@ class ProvidersConfig(Base):
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
- siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动) API gateway
- volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎) API gateway
+ siliconflow: ProviderConfig = Field(
+ default_factory=ProviderConfig
+ ) # SiliconFlow (硅基流动) API gateway
+ volcengine: ProviderConfig = Field(
+ default_factory=ProviderConfig
+ ) # VolcEngine (火山引擎) API gateway
openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
github_copilot: ProviderConfig = Field(default_factory=ProviderConfig) # Github Copilot (OAuth)
@@ -291,7 +312,9 @@ class WebSearchConfig(Base):
class WebToolsConfig(Base):
"""Web tools configuration."""
- proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ proxy: str | None = (
+ None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+ )
search: WebSearchConfig = Field(default_factory=WebSearchConfig)
@@ -305,12 +328,13 @@ class ExecToolConfig(Base):
class MCPServerConfig(Base):
"""MCP server connection configuration (stdio or HTTP)."""
- command: str = "" # Stdio: command to run (e.g. "npx")
- args: list[str] = Field(default_factory=list) # Stdio: command arguments
- env: dict[str, str] = Field(default_factory=dict) # Stdio: extra env vars
- url: str = "" # HTTP: streamable HTTP endpoint URL
- headers: dict[str, str] = Field(default_factory=dict) # HTTP: Custom HTTP Headers
- tool_timeout: int = 30 # Seconds before a tool call is cancelled
+ type: Literal["stdio", "sse", "streamableHttp"] | None = None
+ command: str = ""
+ args: list[str] = Field(default_factory=list)
+ env: dict[str, str] = Field(default_factory=dict)
+ url: str = ""
+ headers: dict[str, str] = Field(default_factory=dict)
+ tool_timeout: int = 30
class ToolsConfig(Base):
@@ -336,7 +360,9 @@ class Config(BaseSettings):
"""Get expanded workspace path."""
return Path(self.agents.defaults.workspace).expanduser()
- def _match_provider(self, model: str | None = None) -> tuple["ProviderConfig | None", str | None]:
+ def _match_provider(
+ self, model: str | None = None
+ ) -> tuple["ProviderConfig | None", str | None]:
"""Match provider config and its registry name. Returns (config, spec_name)."""
from nanobot.providers.registry import PROVIDERS
From 61f658e04519ea7e711e6be707765bfd8ee9257d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Wed, 4 Mar 2026 12:11:18 +0100
Subject: [PATCH 09/28] add reasoning content to on progress message
---
nanobot/agent/loop.py | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py
index 65a62e5..5eea6e6 100644
--- a/nanobot/agent/loop.py
+++ b/nanobot/agent/loop.py
@@ -202,9 +202,16 @@ class AgentLoop:
if response.has_tool_calls:
if on_progress:
- clean = self._strip_think(response.content)
- if clean:
- await on_progress(clean)
+ thoughts = [
+ self._strip_think(response.content),
+ response.reasoning_content,
+ *(f"Thinking [{b.get('signature', '...')}]:\n{b.get('thought', '...')}"
+ for b in (response.thinking_blocks or []) if isinstance(b, dict) and "signature" in b)
+ ]
+
+ if combined := "\n\n".join(filter(None, thoughts)):
+ await on_progress(combined)
+
await on_progress(self._tool_hint(response.tool_calls), tool_hint=True)
tool_call_dicts = [
From ca1f41562c11aadb1e9db9bdaace83cd684db31d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Wed, 4 Mar 2026 13:19:35 +0100
Subject: [PATCH 10/28] Fix telegram stop typing if not final message
---
nanobot/channels/telegram.py | 4 +++-
pyproject.toml | 2 +-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/nanobot/channels/telegram.py b/nanobot/channels/telegram.py
index 5f739e5..de95a15 100644
--- a/nanobot/channels/telegram.py
+++ b/nanobot/channels/telegram.py
@@ -225,7 +225,9 @@ class TelegramChannel(BaseChannel):
logger.warning("Telegram bot not running")
return
- self._stop_typing(msg.chat_id)
+ # Only stop typing indicator for final responses
+ if not msg.metadata.get("_progress", False):
+ self._stop_typing(msg.chat_id)
try:
chat_id = int(msg.chat_id)
diff --git a/pyproject.toml b/pyproject.toml
index 42f6194..7ffe8f5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,7 +30,7 @@ dependencies = [
"rich>=14.0.0,<15.0.0",
"croniter>=6.0.0,<7.0.0",
"dingtalk-stream>=0.24.0,<1.0.0",
- "python-telegram-bot[socks] @ git+https://github.com/python-telegram-bot/python-telegram-bot.git@master",
+ "python-telegram-bot[socks]>=22.0,<23.0",
"lark-oapi>=1.5.0,<2.0.0",
"socksio>=1.0.0,<2.0.0",
"python-socketio>=5.16.0,<6.0.0",
From c27d2b15220b2cff00604c4143851b989792fedf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Thu, 5 Mar 2026 00:33:27 +0100
Subject: [PATCH 11/28] fix(agent): prevent tool hints from overwriting
reasoning in streaming drafts
---
nanobot/agent/loop.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py
index 5eea6e6..fc1fd75 100644
--- a/nanobot/agent/loop.py
+++ b/nanobot/agent/loop.py
@@ -209,10 +209,13 @@ class AgentLoop:
for b in (response.thinking_blocks or []) if isinstance(b, dict) and "signature" in b)
]
- if combined := "\n\n".join(filter(None, thoughts)):
- await on_progress(combined)
-
- await on_progress(self._tool_hint(response.tool_calls), tool_hint=True)
+ combined_thoughts = "\n\n".join(filter(None, thoughts))
+ tool_hint_str = self._tool_hint(response.tool_calls)
+
+ if combined_thoughts:
+ await on_progress(f"{combined_thoughts}\n\n{tool_hint_str}", tool_hint=True)
+ else:
+ await on_progress(tool_hint_str, tool_hint=True)
tool_call_dicts = [
{
From 33f59d8a37a963f5fa694435155f42621d9852ff Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Vall=C3=A9s?=
Date: Thu, 5 Mar 2026 00:45:15 +0100
Subject: [PATCH 12/28] fix(agent): separate reasoning and tool hints to
respect channel config
---
nanobot/agent/loop.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py
index fc1fd75..2f6a2bc 100644
--- a/nanobot/agent/loop.py
+++ b/nanobot/agent/loop.py
@@ -213,6 +213,7 @@ class AgentLoop:
tool_hint_str = self._tool_hint(response.tool_calls)
if combined_thoughts:
+ await on_progress(combined_thoughts)
await on_progress(f"{combined_thoughts}\n\n{tool_hint_str}", tool_hint=True)
else:
await on_progress(tool_hint_str, tool_hint=True)
From 97522bfa0309931e53782ed7a4e2cfdc470853c4 Mon Sep 17 00:00:00 2001
From: coldxiangyu
Date: Thu, 5 Mar 2026 17:27:17 +0800
Subject: [PATCH 13/28] fix(feishu): isolate lark ws Client event loop from
main asyncio loop
Commit 0209ad5 moved `import lark_oapi as lark` inside the start()
method (lazy import) to suppress DeprecationWarnings. This had an
unintended side effect: the import now happens after the main asyncio
loop is already running, so lark_oapi's module-level
loop = asyncio.get_event_loop()
captures the running main loop. When the WebSocket thread then calls
loop.run_until_complete() inside Client.start(), Python raises:
RuntimeError: This event loop is already running
and the _connect/_disconnect coroutines are never awaited.
Fix: in run_ws(), create a fresh event loop with asyncio.new_event_loop(),
set it as the thread's current loop, and patch lark_oapi.ws.client.loop
to point to this dedicated loop before calling Client.start(). The loop
is closed on thread exit.
Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com>
---
nanobot/channels/feishu.py | 30 +++++++++++++++++++++---------
1 file changed, 21 insertions(+), 9 deletions(-)
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index 0cd84c3..fcb70a8 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -290,16 +290,28 @@ class FeishuChannel(BaseChannel):
log_level=lark.LogLevel.INFO
)
- # Start WebSocket client in a separate thread with reconnect loop
+ # Start WebSocket client in a separate thread with reconnect loop.
+ # A dedicated event loop is created for this thread so that lark_oapi's
+ # module-level `loop = asyncio.get_event_loop()` picks up an idle loop
+ # instead of the already-running main asyncio loop, which would cause
+ # "This event loop is already running" errors.
def run_ws():
- while self._running:
- try:
- self._ws_client.start()
- except Exception as e:
- logger.warning("Feishu WebSocket error: {}", e)
- if self._running:
- import time
- time.sleep(5)
+ import time
+ import lark_oapi.ws.client as _lark_ws_client
+ ws_loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(ws_loop)
+ # Patch the module-level loop used by lark's ws Client.start()
+ _lark_ws_client.loop = ws_loop
+ try:
+ while self._running:
+ try:
+ self._ws_client.start()
+ except Exception as e:
+ logger.warning("Feishu WebSocket error: {}", e)
+ if self._running:
+ time.sleep(5)
+ finally:
+ ws_loop.close()
self._ws_thread = threading.Thread(target=run_ws, daemon=True)
self._ws_thread.start()
From 46192fbd2abe922390be1961819a86dc75c74321 Mon Sep 17 00:00:00 2001
From: coldxiangyu
Date: Thu, 5 Mar 2026 20:18:13 +0800
Subject: [PATCH 14/28] fix(context): detect image MIME type from magic bytes
instead of file extension
Feishu downloads images with incorrect extensions (e.g. .jpg for PNG files).
mimetypes.guess_type() relies on the file extension, causing a MIME mismatch
that Anthropic rejects with 'image was specified using image/jpeg but appears
to be image/png'.
Fix: read the first bytes of the image data and detect the real MIME type via
magic bytes (PNG: 0x89PNG, JPEG: 0xFFD8FF, GIF: GIF87a/GIF89a, WEBP: RIFF+WEBP).
Fall back to mimetypes.guess_type() only when magic bytes are inconclusive.
---
nanobot/agent/context.py | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/nanobot/agent/context.py b/nanobot/agent/context.py
index df4825f..7ead317 100644
--- a/nanobot/agent/context.py
+++ b/nanobot/agent/context.py
@@ -12,6 +12,19 @@ from nanobot.agent.memory import MemoryStore
from nanobot.agent.skills import SkillsLoader
+def _detect_image_mime(data: bytes) -> str | None:
+ """Detect image MIME type from magic bytes, ignoring file extension."""
+ if data[:8] == b"\x89PNG\r\n\x1a\n":
+ return "image/png"
+ if data[:3] == b"\xff\xd8\xff":
+ return "image/jpeg"
+ if data[:6] in (b"GIF87a", b"GIF89a"):
+ return "image/gif"
+ if data[:4] == b"RIFF" and data[8:12] == b"WEBP":
+ return "image/webp"
+ return None
+
+
class ContextBuilder:
"""Builds the context (system prompt + messages) for the agent."""
@@ -136,10 +149,14 @@ Reply directly with text for conversations. Only use the 'message' tool to send
images = []
for path in media:
p = Path(path)
- mime, _ = mimetypes.guess_type(path)
- if not p.is_file() or not mime or not mime.startswith("image/"):
+ if not p.is_file():
continue
- b64 = base64.b64encode(p.read_bytes()).decode()
+ raw = p.read_bytes()
+ # Detect real MIME type from magic bytes; fallback to filename guess
+ mime = _detect_image_mime(raw) or mimetypes.guess_type(path)[0]
+ if not mime or not mime.startswith("image/"):
+ continue
+ b64 = base64.b64encode(raw).decode()
images.append({"type": "image_url", "image_url": {"url": f"data:{mime};base64,{b64}"}})
if not images:
From fb77176cfd41b50b3495ffa99cfc22bb6cbd4ed1 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:25:46 +0000
Subject: [PATCH 15/28] feat(custom-provider): keep instance-level session
affinity header for cache locality
---
nanobot/providers/custom_provider.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/nanobot/providers/custom_provider.py b/nanobot/providers/custom_provider.py
index 02183f3..66df734 100644
--- a/nanobot/providers/custom_provider.py
+++ b/nanobot/providers/custom_provider.py
@@ -16,6 +16,7 @@ class CustomProvider(LLMProvider):
def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"):
super().__init__(api_key, api_base)
self.default_model = default_model
+ # Keep affinity stable for this provider instance to improve backend cache locality.
self._client = AsyncOpenAI(
api_key=api_key,
base_url=api_base,
From 06fcd2cc3fed18667672f638a6c7cc54f8d5f736 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:33:14 +0000
Subject: [PATCH 16/28] fix(discord): correct group_policy default to mention
and style cleanup
---
nanobot/channels/discord.py | 6 ++----
nanobot/config/schema.py | 2 +-
2 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/nanobot/channels/discord.py b/nanobot/channels/discord.py
index 85ff28a..900c17b 100644
--- a/nanobot/channels/discord.py
+++ b/nanobot/channels/discord.py
@@ -174,7 +174,7 @@ class DiscordChannel(BaseChannel):
# Capture bot user ID for mention detection
user_data = payload.get("user") or {}
self._bot_user_id = user_data.get("id")
- logger.info(f"Discord bot connected as user {self._bot_user_id}")
+ logger.info("Discord bot connected as user {}", self._bot_user_id)
elif op == 0 and event_type == "MESSAGE_CREATE":
await self._handle_message_create(payload)
elif op == 7:
@@ -287,8 +287,6 @@ class DiscordChannel(BaseChannel):
def _should_respond_in_group(self, payload: dict[str, Any], content: str) -> bool:
"""Check if bot should respond in a group channel based on policy."""
- channel_id = str(payload.get("channel_id", ""))
-
if self.config.group_policy == "open":
return True
@@ -303,7 +301,7 @@ class DiscordChannel(BaseChannel):
# Also check content for mention format <@USER_ID>
if f"<@{self._bot_user_id}>" in content or f"<@!{self._bot_user_id}>" in content:
return True
- logger.debug(f"Discord message in {channel_id} ignored (bot not mentioned)")
+ logger.debug("Discord message in {} ignored (bot not mentioned)", payload.get("channel_id"))
return False
return True
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index a6b609b..9d7da3b 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -62,7 +62,7 @@ class DiscordConfig(Base):
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs
gateway_url: str = "wss://gateway.discord.gg/?v=10&encoding=json"
intents: int = 37377 # GUILDS + GUILD_MESSAGES + DIRECT_MESSAGES + MESSAGE_CONTENT
- group_policy: str = "open" # "mention" or "open"
+ group_policy: Literal["mention", "open"] = "mention"
class MatrixConfig(Base):
From b71c1bdca7dd0aa6323d7b8074bf4be25aa44a9b Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:44:45 +0000
Subject: [PATCH 17/28] fix(mcp): hoist sse/http imports, annotate
auto-detection heuristic, restore field comments
---
README.md | 4 ++--
nanobot/agent/tools/mcp.py | 6 +++---
nanobot/config/schema.py | 14 +++++++-------
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/README.md b/README.md
index 6c9304d..5bc70b8 100644
--- a/README.md
+++ b/README.md
@@ -14,9 +14,9 @@
🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw)
-⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines.
+⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw, making it more customizable and understandable.
-📏 Real-time line count: **3,935 lines** (run `bash core_agent_lines.sh` to verify anytime)
+📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime
## 📢 News
diff --git a/nanobot/agent/tools/mcp.py b/nanobot/agent/tools/mcp.py
index 151aa55..2cbffd0 100644
--- a/nanobot/agent/tools/mcp.py
+++ b/nanobot/agent/tools/mcp.py
@@ -58,7 +58,9 @@ async def connect_mcp_servers(
) -> None:
"""Connect to configured MCP servers and register their tools."""
from mcp import ClientSession, StdioServerParameters
+ from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client
+ from mcp.client.streamable_http import streamable_http_client
for name, cfg in mcp_servers.items():
try:
@@ -67,6 +69,7 @@ async def connect_mcp_servers(
if cfg.command:
transport_type = "stdio"
elif cfg.url:
+ # Convention: URLs ending with /sse use SSE transport; others use streamableHttp
transport_type = (
"sse" if cfg.url.rstrip("/").endswith("/sse") else "streamableHttp"
)
@@ -80,8 +83,6 @@ async def connect_mcp_servers(
)
read, write = await stack.enter_async_context(stdio_client(params))
elif transport_type == "sse":
- from mcp.client.sse import sse_client
-
def httpx_client_factory(
headers: dict[str, str] | None = None,
timeout: httpx.Timeout | None = None,
@@ -99,7 +100,6 @@ async def connect_mcp_servers(
sse_client(cfg.url, httpx_client_factory=httpx_client_factory)
)
elif transport_type == "streamableHttp":
- from mcp.client.streamable_http import streamable_http_client
# Always provide an explicit httpx client so MCP HTTP transport does not
# inherit httpx's default 5s timeout and preempt the higher-level tool timeout.
http_client = await stack.enter_async_context(
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 9f2e5b3..1f2f946 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -329,13 +329,13 @@ class ExecToolConfig(Base):
class MCPServerConfig(Base):
"""MCP server connection configuration (stdio or HTTP)."""
- type: Literal["stdio", "sse", "streamableHttp"] | None = None
- command: str = ""
- args: list[str] = Field(default_factory=list)
- env: dict[str, str] = Field(default_factory=dict)
- url: str = ""
- headers: dict[str, str] = Field(default_factory=dict)
- tool_timeout: int = 30
+ type: Literal["stdio", "sse", "streamableHttp"] | None = None # auto-detected if omitted
+ command: str = "" # Stdio: command to run (e.g. "npx")
+ args: list[str] = Field(default_factory=list) # Stdio: command arguments
+ env: dict[str, str] = Field(default_factory=dict) # Stdio: extra env vars
+ url: str = "" # HTTP/SSE: endpoint URL
+ headers: dict[str, str] = Field(default_factory=dict) # HTTP/SSE: custom headers
+ tool_timeout: int = 30 # seconds before a tool call is cancelled
class ToolsConfig(Base):
From 57d8aefc2289144339640be677d5d4e3edfdcb6f Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:46:03 +0000
Subject: [PATCH 18/28] docs: update introduction of nanobot
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 5bc70b8..4c5e9a6 100644
--- a/README.md
+++ b/README.md
@@ -12,11 +12,11 @@
-🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw)
+🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw).
⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw, making it more customizable and understandable.
-📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime
+📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime.
## 📢 News
From cd0bcc162e5a742e452918c4835384774d7a7938 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:48:57 +0000
Subject: [PATCH 19/28] docs: update introduction of nanobot
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 4c5e9a6..1374fb8 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@
🐈 **nanobot** is an **ultra-lightweight** personal AI assistant inspired by [OpenClaw](https://github.com/openclaw/openclaw).
-⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw, making it more customizable and understandable.
+⚡️ Delivers core agent functionality with **99% fewer lines of code** than OpenClaw.
📏 Real-time line count: run `bash core_agent_lines.sh` to verify anytime.
From 0343d66224007d6d7964984db7741ae710c81167 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 14:54:53 +0000
Subject: [PATCH 20/28] fix(gateway): remove duplicate load_config() that
overwrote custom workspace/config
---
nanobot/cli/commands.py | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 05e2cbe..b097059 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -244,7 +244,7 @@ def _make_provider(config: Config):
@app.command()
def gateway(
port: int = typer.Option(18790, "--port", "-p", help="Gateway port"),
- workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory (default: ~/.nanobot/workspace)"),
+ workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory"),
config: str | None = typer.Option(None, "--config", "-c", help="Config file path"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
):
@@ -252,7 +252,7 @@ def gateway(
from nanobot.agent.loop import AgentLoop
from nanobot.bus.queue import MessageBus
from nanobot.channels.manager import ChannelManager
- from nanobot.config.loader import get_data_dir, load_config
+ from nanobot.config.loader import load_config
from nanobot.cron.service import CronService
from nanobot.cron.types import CronJob
from nanobot.heartbeat.service import HeartbeatService
@@ -262,17 +262,12 @@ def gateway(
import logging
logging.basicConfig(level=logging.DEBUG)
- # Load config from custom path if provided, otherwise use default
config_path = Path(config) if config else None
config = load_config(config_path)
-
- # Override workspace if specified via command line
if workspace:
config.agents.defaults.workspace = workspace
console.print(f"{__logo__} Starting nanobot gateway on port {port}...")
-
- config = load_config()
sync_workspace_templates(config.workspace_path)
bus = MessageBus()
provider = _make_provider(config)
From d32c6f946c5fd030ddfbbb645adb43b84a43d6ed Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Mar 2026 15:17:30 +0000
Subject: [PATCH 21/28] fix(telegram): pin ptb>=22.6, fix double progress,
clean up stale hatch config
---
nanobot/agent/loop.py | 14 ++++++--------
nanobot/channels/telegram.py | 2 +-
pyproject.toml | 5 +----
3 files changed, 8 insertions(+), 13 deletions(-)
diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py
index 2f6a2bc..7f129a2 100644
--- a/nanobot/agent/loop.py
+++ b/nanobot/agent/loop.py
@@ -205,18 +205,16 @@ class AgentLoop:
thoughts = [
self._strip_think(response.content),
response.reasoning_content,
- *(f"Thinking [{b.get('signature', '...')}]:\n{b.get('thought', '...')}"
- for b in (response.thinking_blocks or []) if isinstance(b, dict) and "signature" in b)
+ *(
+ f"Thinking [{b.get('signature', '...')}]:\n{b.get('thought', '...')}"
+ for b in (response.thinking_blocks or [])
+ if isinstance(b, dict) and "signature" in b
+ ),
]
-
combined_thoughts = "\n\n".join(filter(None, thoughts))
- tool_hint_str = self._tool_hint(response.tool_calls)
-
if combined_thoughts:
await on_progress(combined_thoughts)
- await on_progress(f"{combined_thoughts}\n\n{tool_hint_str}", tool_hint=True)
- else:
- await on_progress(tool_hint_str, tool_hint=True)
+ await on_progress(self._tool_hint(response.tool_calls), tool_hint=True)
tool_call_dicts = [
{
diff --git a/nanobot/channels/telegram.py b/nanobot/channels/telegram.py
index de95a15..884b2d0 100644
--- a/nanobot/channels/telegram.py
+++ b/nanobot/channels/telegram.py
@@ -292,7 +292,7 @@ class TelegramChannel(BaseChannel):
reply_parameters=reply_params
)
except Exception as e:
- logger.warning("HTML parse failed (or draft send failed), falling back to plain text: {}", e)
+ logger.warning("HTML parse failed, falling back to plain text: {}", e)
try:
if is_progress and draft_id:
await self._app.bot.send_message_draft(
diff --git a/pyproject.toml b/pyproject.toml
index 674a1ef..41d0fbb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,7 +30,7 @@ dependencies = [
"rich>=14.0.0,<15.0.0",
"croniter>=6.0.0,<7.0.0",
"dingtalk-stream>=0.24.0,<1.0.0",
- "python-telegram-bot[socks]>=22.0,<23.0",
+ "python-telegram-bot[socks]>=22.6,<23.0",
"lark-oapi>=1.5.0,<2.0.0",
"socksio>=1.0.0,<2.0.0",
"python-socketio>=5.16.0,<6.0.0",
@@ -68,9 +68,6 @@ nanobot = "nanobot.cli.commands:app"
requires = ["hatchling"]
build-backend = "hatchling.build"
-[tool.hatch.metadata]
-allow-direct-references = true
-
[tool.hatch.build.targets.wheel]
packages = ["nanobot"]
From 5ced08b1f23f5ef275465fbe3140f64d42c95ced Mon Sep 17 00:00:00 2001
From: pikaqqqqqq
Date: Fri, 6 Mar 2026 01:54:00 +0800
Subject: [PATCH 22/28] fix(feishu): use msg_type "media" for mp4 video files
Previously, mp4 video files were sent with msg_type "file", which meant
users had to download them to play. Feishu requires msg_type "media" for
audio and video files to enable inline playback in the chat.
Changes:
- Add _VIDEO_EXTS constant for video file extensions (.mp4, .mov, .avi)
- Use msg_type "media" for both audio (_AUDIO_EXTS) and video (_VIDEO_EXTS)
- Keep msg_type "file" for documents and other file types
The upload_file API already uses file_type="mp4" for video files via the
existing _FILE_TYPE_MAP, so only the send msg_type needed fixing.
---
nanobot/channels/feishu.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index e6f0049..3847ac1 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -474,6 +474,7 @@ class FeishuChannel(BaseChannel):
_IMAGE_EXTS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".webp", ".ico", ".tiff", ".tif"}
_AUDIO_EXTS = {".opus"}
+ _VIDEO_EXTS = {".mp4", ".mov", ".avi"}
_FILE_TYPE_MAP = {
".opus": "opus", ".mp4": "mp4", ".pdf": "pdf", ".doc": "doc", ".docx": "doc",
".xls": "xls", ".xlsx": "xls", ".ppt": "ppt", ".pptx": "ppt",
@@ -682,7 +683,12 @@ class FeishuChannel(BaseChannel):
else:
key = await loop.run_in_executor(None, self._upload_file_sync, file_path)
if key:
- media_type = "audio" if ext in self._AUDIO_EXTS else "file"
+ # Use msg_type "media" for audio/video so users can play inline;
+ # "file" for everything else (documents, archives, etc.)
+ if ext in self._AUDIO_EXTS or ext in self._VIDEO_EXTS:
+ media_type = "media"
+ else:
+ media_type = "file"
await loop.run_in_executor(
None, self._send_message_sync,
receive_id_type, msg.chat_id, media_type, json.dumps({"file_key": key}, ensure_ascii=False),
From 9ab4155991627e45dd2c88b028d35c55b82ecce9 Mon Sep 17 00:00:00 2001
From: nanobot-contributor
Date: Fri, 6 Mar 2026 09:57:03 +0800
Subject: [PATCH 23/28] fix(cli): add Windows compatibility for signal handlers
(PR #1400)
SIGHUP and SIGPIPE are not available on Windows. Add hasattr() checks
before registering these signal handlers to prevent AttributeError on
Windows systems.
Fixes compatibility issue introduced in PR #1400.
---
nanobot/cli/commands.py | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index aca0778..eb3d833 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -7,6 +7,18 @@ import signal
import sys
from pathlib import Path
+# Force UTF-8 encoding for Windows console
+if sys.platform == "win32":
+ import locale
+ if sys.stdout.encoding != "utf-8":
+ os.environ["PYTHONIOENCODING"] = "utf-8"
+ # Re-open stdout/stderr with UTF-8 encoding
+ try:
+ sys.stdout.reconfigure(encoding="utf-8", errors="replace")
+ sys.stderr.reconfigure(encoding="utf-8", errors="replace")
+ except Exception:
+ pass
+
import typer
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import HTML
@@ -525,9 +537,13 @@ def agent(
signal.signal(signal.SIGINT, _handle_signal)
signal.signal(signal.SIGTERM, _handle_signal)
- signal.signal(signal.SIGHUP, _handle_signal)
+ # SIGHUP is not available on Windows
+ if hasattr(signal, 'SIGHUP'):
+ signal.signal(signal.SIGHUP, _handle_signal)
# Ignore SIGPIPE to prevent silent process termination when writing to closed pipes
- signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+ # SIGPIPE is not available on Windows
+ if hasattr(signal, 'SIGPIPE'):
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
async def run_interactive():
bus_task = asyncio.create_task(agent_loop.run())
From c3526a7fdb2418d68c03d34db5ee43b624edbce9 Mon Sep 17 00:00:00 2001
From: PiKaqqqqqq <281705236@qq.com>
Date: Fri, 6 Mar 2026 10:11:53 +0800
Subject: [PATCH 24/28] fix(feishu): smart message format selection (fixes
#1548)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Instead of always sending interactive cards, detect the optimal
message format based on content:
- text: short plain text (≤200 chars, no markdown)
- post: medium text with links (≤2000 chars)
- interactive: complex content (code, tables, headings, bold, lists)
---
nanobot/channels/feishu.py | 143 +++++++++++++++++++++++++++++++++++--
pr-description.md | 47 ++++++++++++
2 files changed, 186 insertions(+), 4 deletions(-)
create mode 100644 pr-description.md
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index e6f0049..c405493 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -472,6 +472,121 @@ class FeishuChannel(BaseChannel):
return elements or [{"tag": "markdown", "content": content}]
+ # ── Smart format detection ──────────────────────────────────────────
+ # Patterns that indicate "complex" markdown needing card rendering
+ _COMPLEX_MD_RE = re.compile(
+ r"```" # fenced code block
+ r"|^\|.+\|.*\n\s*\|[-:\s|]+\|" # markdown table (header + separator)
+ r"|^#{1,6}\s+" # headings
+ , re.MULTILINE,
+ )
+
+ # Simple markdown patterns (bold, italic, strikethrough)
+ _SIMPLE_MD_RE = re.compile(
+ r"\*\*.+?\*\*" # **bold**
+ r"|__.+?__" # __bold__
+ r"|(? str:
+ """Determine the optimal Feishu message format for *content*.
+
+ Returns one of:
+ - ``"text"`` – plain text, short and no markdown
+ - ``"post"`` – rich text (links only, moderate length)
+ - ``"interactive"`` – card with full markdown rendering
+ """
+ stripped = content.strip()
+
+ # Complex markdown (code blocks, tables, headings) → always card
+ if cls._COMPLEX_MD_RE.search(stripped):
+ return "interactive"
+
+ # Long content → card (better readability with card layout)
+ if len(stripped) > cls._POST_MAX_LEN:
+ return "interactive"
+
+ # Has bold/italic/strikethrough → card (post format can't render these)
+ if cls._SIMPLE_MD_RE.search(stripped):
+ return "interactive"
+
+ # Has list items → card (post format can't render list bullets well)
+ if cls._LIST_RE.search(stripped) or cls._OLIST_RE.search(stripped):
+ return "interactive"
+
+ # Has links → post format (supports tags)
+ if cls._MD_LINK_RE.search(stripped):
+ return "post"
+
+ # Short plain text → text format
+ if len(stripped) <= cls._TEXT_MAX_LEN:
+ return "text"
+
+ # Medium plain text without any formatting → post format
+ return "post"
+
+ @classmethod
+ def _markdown_to_post(cls, content: str) -> str:
+ """Convert markdown content to Feishu post message JSON.
+
+ Handles links ``[text](url)`` as ``a`` tags; everything else as ``text`` tags.
+ Each line becomes a paragraph (row) in the post body.
+ """
+ lines = content.strip().split("\n")
+ paragraphs: list[list[dict]] = []
+
+ for line in lines:
+ elements: list[dict] = []
+ last_end = 0
+
+ for m in cls._MD_LINK_RE.finditer(line):
+ # Text before this link
+ before = line[last_end:m.start()]
+ if before:
+ elements.append({"tag": "text", "text": before})
+ elements.append({
+ "tag": "a",
+ "text": m.group(1),
+ "href": m.group(2),
+ })
+ last_end = m.end()
+
+ # Remaining text after last link
+ remaining = line[last_end:]
+ if remaining:
+ elements.append({"tag": "text", "text": remaining})
+
+ # Empty line → empty paragraph for spacing
+ if not elements:
+ elements.append({"tag": "text", "text": ""})
+
+ paragraphs.append(elements)
+
+ post_body = {
+ "zh_cn": {
+ "content": paragraphs,
+ }
+ }
+ return json.dumps(post_body, ensure_ascii=False)
+
_IMAGE_EXTS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".webp", ".ico", ".tiff", ".tif"}
_AUDIO_EXTS = {".opus"}
_FILE_TYPE_MAP = {
@@ -689,14 +804,34 @@ class FeishuChannel(BaseChannel):
)
if msg.content and msg.content.strip():
- elements = self._build_card_elements(msg.content)
- for chunk in self._split_elements_by_table_limit(elements):
- card = {"config": {"wide_screen_mode": True}, "elements": chunk}
+ fmt = self._detect_msg_format(msg.content)
+
+ if fmt == "text":
+ # Short plain text – send as simple text message
+ text_body = json.dumps({"text": msg.content.strip()}, ensure_ascii=False)
await loop.run_in_executor(
None, self._send_message_sync,
- receive_id_type, msg.chat_id, "interactive", json.dumps(card, ensure_ascii=False),
+ receive_id_type, msg.chat_id, "text", text_body,
)
+ elif fmt == "post":
+ # Medium content with links – send as rich-text post
+ post_body = self._markdown_to_post(msg.content)
+ await loop.run_in_executor(
+ None, self._send_message_sync,
+ receive_id_type, msg.chat_id, "post", post_body,
+ )
+
+ else:
+ # Complex / long content – send as interactive card
+ elements = self._build_card_elements(msg.content)
+ for chunk in self._split_elements_by_table_limit(elements):
+ card = {"config": {"wide_screen_mode": True}, "elements": chunk}
+ await loop.run_in_executor(
+ None, self._send_message_sync,
+ receive_id_type, msg.chat_id, "interactive", json.dumps(card, ensure_ascii=False),
+ )
+
except Exception as e:
logger.error("Error sending Feishu message: {}", e)
diff --git a/pr-description.md b/pr-description.md
new file mode 100644
index 0000000..dacab5c
--- /dev/null
+++ b/pr-description.md
@@ -0,0 +1,47 @@
+## fix(feishu): smart message format selection (fixes #1548)
+
+### Problem
+
+Currently, the Feishu channel sends **all** messages as interactive cards (`msg_type: "interactive"`). This is overkill for short, simple replies like "OK" or "收到" — they look heavy and unnatural compared to normal chat messages.
+
+### Solution
+
+Implement smart message format selection that picks the most appropriate Feishu message type based on content analysis:
+
+| Content Type | Format | `msg_type` |
+|---|---|---|
+| Short plain text (≤ 200 chars, no markdown) | Text | `text` |
+| Medium text with links (≤ 2000 chars, no complex formatting) | Rich Text Post | `post` |
+| Long text, code blocks, tables, headings, bold/italic, lists | Interactive Card | `interactive` |
+
+### How it works
+
+1. **`_detect_msg_format(content)`** — Analyzes the message content and returns the optimal format:
+ - Checks for complex markdown (code blocks, tables, headings) → `interactive`
+ - Checks for simple markdown (bold, italic, lists) → `interactive`
+ - Checks for links → `post` (Feishu post format supports `` tags natively)
+ - Short plain text → `text`
+ - Medium plain text → `post`
+
+2. **`_markdown_to_post(content)`** — Converts markdown links `[text](url)` to Feishu post format with proper `a` tags. Each line becomes a paragraph in the post body.
+
+3. **Modified `send()` method** — Uses `_detect_msg_format()` to choose the right format, then dispatches to the appropriate sending logic.
+
+### Design decisions
+
+- **Post format for links only**: Feishu's post format (`[[{"tag":"text",...}]]`) doesn't support bold/italic rendering, so we only use it for messages containing links (where the `a` tag adds real value). Messages with bold/italic/lists still use cards which render markdown properly.
+- **Conservative thresholds**: 200 chars for text, 2000 chars for post — these keep the UX natural without being too aggressive.
+- **Backward compatible**: The card rendering path is completely unchanged. Only the routing logic is new.
+
+### Testing
+
+Format detection tested against 13 cases covering all content types:
+- ✅ Plain text → `text`
+- ✅ Links → `post`
+- ✅ Bold/italic/code/tables/headings/lists → `interactive`
+- ✅ Long content → `interactive`
+- ✅ Post format generates valid Feishu post JSON with proper `a` tags
+
+### Changes
+
+- `nanobot/channels/feishu.py`: Added `_detect_msg_format()`, `_markdown_to_post()`, and updated `send()` method
From 6fb4204ac6a5109a4ff068a17975615498c40c05 Mon Sep 17 00:00:00 2001
From: nanobot-contributor
Date: Fri, 6 Mar 2026 11:47:00 +0800
Subject: [PATCH 25/28] fix(memory): handle list type tool call arguments
Some LLM providers return tool_calls[0].arguments as a list instead of
dict or str. Add handling to extract the first dict element from the list.
Fixes /new command warning: 'unexpected arguments type list'
---
nanobot/agent/memory.py | 7 +++
tests/test_memory_consolidation_types.py | 75 ++++++++++++++++++++++++
2 files changed, 82 insertions(+)
diff --git a/nanobot/agent/memory.py b/nanobot/agent/memory.py
index 93c1825..80fba5e 100644
--- a/nanobot/agent/memory.py
+++ b/nanobot/agent/memory.py
@@ -128,6 +128,13 @@ class MemoryStore:
# Some providers return arguments as a JSON string instead of dict
if isinstance(args, str):
args = json.loads(args)
+ # Some providers return arguments as a list (handle edge case)
+ if isinstance(args, list):
+ if args and isinstance(args[0], dict):
+ args = args[0]
+ else:
+ logger.warning("Memory consolidation: unexpected arguments type list with non-dict content")
+ return False
if not isinstance(args, dict):
logger.warning("Memory consolidation: unexpected arguments type {}", type(args).__name__)
return False
diff --git a/tests/test_memory_consolidation_types.py b/tests/test_memory_consolidation_types.py
index 375c802..ff15584 100644
--- a/tests/test_memory_consolidation_types.py
+++ b/tests/test_memory_consolidation_types.py
@@ -145,3 +145,78 @@ class TestMemoryConsolidationTypeHandling:
assert result is True
provider.chat.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_list_arguments_extracts_first_dict(self, tmp_path: Path) -> None:
+ """Some providers return arguments as a list - extract first element if it's a dict."""
+ store = MemoryStore(tmp_path)
+ provider = AsyncMock()
+
+ # Simulate arguments being a list containing a dict
+ response = LLMResponse(
+ content=None,
+ tool_calls=[
+ ToolCallRequest(
+ id="call_1",
+ name="save_memory",
+ arguments=[{
+ "history_entry": "[2026-01-01] User discussed testing.",
+ "memory_update": "# Memory\nUser likes testing.",
+ }],
+ )
+ ],
+ )
+ provider.chat = AsyncMock(return_value=response)
+ session = _make_session(message_count=60)
+
+ result = await store.consolidate(session, provider, "test-model", memory_window=50)
+
+ assert result is True
+ assert "User discussed testing." in store.history_file.read_text()
+ assert "User likes testing." in store.memory_file.read_text()
+
+ @pytest.mark.asyncio
+ async def test_list_arguments_empty_list_returns_false(self, tmp_path: Path) -> None:
+ """Empty list arguments should return False."""
+ store = MemoryStore(tmp_path)
+ provider = AsyncMock()
+
+ response = LLMResponse(
+ content=None,
+ tool_calls=[
+ ToolCallRequest(
+ id="call_1",
+ name="save_memory",
+ arguments=[],
+ )
+ ],
+ )
+ provider.chat = AsyncMock(return_value=response)
+ session = _make_session(message_count=60)
+
+ result = await store.consolidate(session, provider, "test-model", memory_window=50)
+
+ assert result is False
+
+ @pytest.mark.asyncio
+ async def test_list_arguments_non_dict_content_returns_false(self, tmp_path: Path) -> None:
+ """List with non-dict content should return False."""
+ store = MemoryStore(tmp_path)
+ provider = AsyncMock()
+
+ response = LLMResponse(
+ content=None,
+ tool_calls=[
+ ToolCallRequest(
+ id="call_1",
+ name="save_memory",
+ arguments=["string", "content"],
+ )
+ ],
+ )
+ provider.chat = AsyncMock(return_value=response)
+ session = _make_session(message_count=60)
+
+ result = await store.consolidate(session, provider, "test-model", memory_window=50)
+
+ assert result is False
From fc0b38c3047c20241c94b38f1be6138191da41f6 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Fri, 6 Mar 2026 05:27:39 +0000
Subject: [PATCH 26/28] fix(memory): improve warning message for empty/non-dict
list arguments
---
nanobot/agent/memory.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/nanobot/agent/memory.py b/nanobot/agent/memory.py
index 80fba5e..21fe77d 100644
--- a/nanobot/agent/memory.py
+++ b/nanobot/agent/memory.py
@@ -133,7 +133,7 @@ class MemoryStore:
if args and isinstance(args[0], dict):
args = args[0]
else:
- logger.warning("Memory consolidation: unexpected arguments type list with non-dict content")
+ logger.warning("Memory consolidation: unexpected arguments as empty or non-dict list")
return False
if not isinstance(args, dict):
logger.warning("Memory consolidation: unexpected arguments type {}", type(args).__name__)
From ba63f6f62d9b2181b56863d8efe32215fe8f6321 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Fri, 6 Mar 2026 06:09:46 +0000
Subject: [PATCH 27/28] chore: remove pr-description.md from repo
---
pr-description.md | 47 -----------------------------------------------
1 file changed, 47 deletions(-)
delete mode 100644 pr-description.md
diff --git a/pr-description.md b/pr-description.md
deleted file mode 100644
index dacab5c..0000000
--- a/pr-description.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## fix(feishu): smart message format selection (fixes #1548)
-
-### Problem
-
-Currently, the Feishu channel sends **all** messages as interactive cards (`msg_type: "interactive"`). This is overkill for short, simple replies like "OK" or "收到" — they look heavy and unnatural compared to normal chat messages.
-
-### Solution
-
-Implement smart message format selection that picks the most appropriate Feishu message type based on content analysis:
-
-| Content Type | Format | `msg_type` |
-|---|---|---|
-| Short plain text (≤ 200 chars, no markdown) | Text | `text` |
-| Medium text with links (≤ 2000 chars, no complex formatting) | Rich Text Post | `post` |
-| Long text, code blocks, tables, headings, bold/italic, lists | Interactive Card | `interactive` |
-
-### How it works
-
-1. **`_detect_msg_format(content)`** — Analyzes the message content and returns the optimal format:
- - Checks for complex markdown (code blocks, tables, headings) → `interactive`
- - Checks for simple markdown (bold, italic, lists) → `interactive`
- - Checks for links → `post` (Feishu post format supports `` tags natively)
- - Short plain text → `text`
- - Medium plain text → `post`
-
-2. **`_markdown_to_post(content)`** — Converts markdown links `[text](url)` to Feishu post format with proper `a` tags. Each line becomes a paragraph in the post body.
-
-3. **Modified `send()` method** — Uses `_detect_msg_format()` to choose the right format, then dispatches to the appropriate sending logic.
-
-### Design decisions
-
-- **Post format for links only**: Feishu's post format (`[[{"tag":"text",...}]]`) doesn't support bold/italic rendering, so we only use it for messages containing links (where the `a` tag adds real value). Messages with bold/italic/lists still use cards which render markdown properly.
-- **Conservative thresholds**: 200 chars for text, 2000 chars for post — these keep the UX natural without being too aggressive.
-- **Backward compatible**: The card rendering path is completely unchanged. Only the routing logic is new.
-
-### Testing
-
-Format detection tested against 13 cases covering all content types:
-- ✅ Plain text → `text`
-- ✅ Links → `post`
-- ✅ Bold/italic/code/tables/headings/lists → `interactive`
-- ✅ Long content → `interactive`
-- ✅ Post format generates valid Feishu post JSON with proper `a` tags
-
-### Changes
-
-- `nanobot/channels/feishu.py`: Added `_detect_msg_format()`, `_markdown_to_post()`, and updated `send()` method
From 3a01fe536a37c8424fc196b1b0aad3535a50af93 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Fri, 6 Mar 2026 06:49:09 +0000
Subject: [PATCH 28/28] refactor: move detect_image_mime to utils/helpers for
reuse
---
nanobot/agent/context.py | 16 ++--------------
nanobot/utils/helpers.py | 13 +++++++++++++
2 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/nanobot/agent/context.py b/nanobot/agent/context.py
index 7ead317..27511fa 100644
--- a/nanobot/agent/context.py
+++ b/nanobot/agent/context.py
@@ -10,19 +10,7 @@ from typing import Any
from nanobot.agent.memory import MemoryStore
from nanobot.agent.skills import SkillsLoader
-
-
-def _detect_image_mime(data: bytes) -> str | None:
- """Detect image MIME type from magic bytes, ignoring file extension."""
- if data[:8] == b"\x89PNG\r\n\x1a\n":
- return "image/png"
- if data[:3] == b"\xff\xd8\xff":
- return "image/jpeg"
- if data[:6] in (b"GIF87a", b"GIF89a"):
- return "image/gif"
- if data[:4] == b"RIFF" and data[8:12] == b"WEBP":
- return "image/webp"
- return None
+from nanobot.utils.helpers import detect_image_mime
class ContextBuilder:
@@ -153,7 +141,7 @@ Reply directly with text for conversations. Only use the 'message' tool to send
continue
raw = p.read_bytes()
# Detect real MIME type from magic bytes; fallback to filename guess
- mime = _detect_image_mime(raw) or mimetypes.guess_type(path)[0]
+ mime = detect_image_mime(raw) or mimetypes.guess_type(path)[0]
if not mime or not mime.startswith("image/"):
continue
b64 = base64.b64encode(raw).decode()
diff --git a/nanobot/utils/helpers.py b/nanobot/utils/helpers.py
index 3a8c802..b543174 100644
--- a/nanobot/utils/helpers.py
+++ b/nanobot/utils/helpers.py
@@ -5,6 +5,19 @@ from datetime import datetime
from pathlib import Path
+def detect_image_mime(data: bytes) -> str | None:
+ """Detect image MIME type from magic bytes, ignoring file extension."""
+ if data[:8] == b"\x89PNG\r\n\x1a\n":
+ return "image/png"
+ if data[:3] == b"\xff\xd8\xff":
+ return "image/jpeg"
+ if data[:6] in (b"GIF87a", b"GIF89a"):
+ return "image/gif"
+ if data[:4] == b"RIFF" and data[8:12] == b"WEBP":
+ return "image/webp"
+ return None
+
+
def ensure_dir(path: Path) -> Path:
"""Ensure directory exists, return it."""
path.mkdir(parents=True, exist_ok=True)