Merge branch 'HKUDS:main' into feat/improve-tool-validation-tests

This commit is contained in:
Barry Wang
2026-03-06 15:42:26 +08:00
committed by GitHub
18 changed files with 752 additions and 209 deletions

View File

@@ -10,6 +10,7 @@ from typing import Any
from nanobot.agent.memory import MemoryStore
from nanobot.agent.skills import SkillsLoader
from nanobot.utils.helpers import detect_image_mime
class ContextBuilder:
@@ -136,10 +137,14 @@ Reply directly with text for conversations. Only use the 'message' tool to send
images = []
for path in media:
p = Path(path)
mime, _ = mimetypes.guess_type(path)
if not p.is_file() or not mime or not mime.startswith("image/"):
if not p.is_file():
continue
b64 = base64.b64encode(p.read_bytes()).decode()
raw = p.read_bytes()
# Detect real MIME type from magic bytes; fallback to filename guess
mime = detect_image_mime(raw) or mimetypes.guess_type(path)[0]
if not mime or not mime.startswith("image/"):
continue
b64 = base64.b64encode(raw).decode()
images.append({"type": "image_url", "image_url": {"url": f"data:{mime};base64,{b64}"}})
if not images:

View File

@@ -202,9 +202,18 @@ class AgentLoop:
if response.has_tool_calls:
if on_progress:
clean = self._strip_think(response.content)
if clean:
await on_progress(clean)
thoughts = [
self._strip_think(response.content),
response.reasoning_content,
*(
f"Thinking [{b.get('signature', '...')}]:\n{b.get('thought', '...')}"
for b in (response.thinking_blocks or [])
if isinstance(b, dict) and "signature" in b
),
]
combined_thoughts = "\n\n".join(filter(None, thoughts))
if combined_thoughts:
await on_progress(combined_thoughts)
await on_progress(self._tool_hint(response.tool_calls), tool_hint=True)
tool_call_dicts = [

View File

@@ -128,6 +128,13 @@ class MemoryStore:
# Some providers return arguments as a JSON string instead of dict
if isinstance(args, str):
args = json.loads(args)
# Some providers return arguments as a list (handle edge case)
if isinstance(args, list):
if args and isinstance(args[0], dict):
args = args[0]
else:
logger.warning("Memory consolidation: unexpected arguments as empty or non-dict list")
return False
if not isinstance(args, dict):
logger.warning("Memory consolidation: unexpected arguments type {}", type(args).__name__)
return False

View File

@@ -58,17 +58,48 @@ async def connect_mcp_servers(
) -> None:
"""Connect to configured MCP servers and register their tools."""
from mcp import ClientSession, StdioServerParameters
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client
from mcp.client.streamable_http import streamable_http_client
for name, cfg in mcp_servers.items():
try:
if cfg.command:
transport_type = cfg.type
if not transport_type:
if cfg.command:
transport_type = "stdio"
elif cfg.url:
# Convention: URLs ending with /sse use SSE transport; others use streamableHttp
transport_type = (
"sse" if cfg.url.rstrip("/").endswith("/sse") else "streamableHttp"
)
else:
logger.warning("MCP server '{}': no command or url configured, skipping", name)
continue
if transport_type == "stdio":
params = StdioServerParameters(
command=cfg.command, args=cfg.args, env=cfg.env or None
)
read, write = await stack.enter_async_context(stdio_client(params))
elif cfg.url:
from mcp.client.streamable_http import streamable_http_client
elif transport_type == "sse":
def httpx_client_factory(
headers: dict[str, str] | None = None,
timeout: httpx.Timeout | None = None,
auth: httpx.Auth | None = None,
) -> httpx.AsyncClient:
merged_headers = {**(cfg.headers or {}), **(headers or {})}
return httpx.AsyncClient(
headers=merged_headers or None,
follow_redirects=True,
timeout=timeout,
auth=auth,
)
read, write = await stack.enter_async_context(
sse_client(cfg.url, httpx_client_factory=httpx_client_factory)
)
elif transport_type == "streamableHttp":
# Always provide an explicit httpx client so MCP HTTP transport does not
# inherit httpx's default 5s timeout and preempt the higher-level tool timeout.
http_client = await stack.enter_async_context(
@@ -82,7 +113,7 @@ async def connect_mcp_servers(
streamable_http_client(cfg.url, http_client=http_client)
)
else:
logger.warning("MCP server '{}': no command or url configured, skipping", name)
logger.warning("MCP server '{}': unknown transport type '{}'", name, transport_type)
continue
session = await stack.enter_async_context(ClientSession(read, write))

View File

@@ -13,34 +13,13 @@ from nanobot.bus.events import OutboundMessage
from nanobot.bus.queue import MessageBus
from nanobot.channels.base import BaseChannel
from nanobot.config.schema import DiscordConfig
from nanobot.utils.helpers import split_message
DISCORD_API_BASE = "https://discord.com/api/v10"
MAX_ATTACHMENT_BYTES = 20 * 1024 * 1024 # 20MB
MAX_MESSAGE_LEN = 2000 # Discord message character limit
def _split_message(content: str, max_len: int = MAX_MESSAGE_LEN) -> list[str]:
"""Split content into chunks within max_len, preferring line breaks."""
if not content:
return []
if len(content) <= max_len:
return [content]
chunks: list[str] = []
while content:
if len(content) <= max_len:
chunks.append(content)
break
cut = content[:max_len]
pos = cut.rfind('\n')
if pos <= 0:
pos = cut.rfind(' ')
if pos <= 0:
pos = max_len
chunks.append(content[:pos])
content = content[pos:].lstrip()
return chunks
class DiscordChannel(BaseChannel):
"""Discord channel using Gateway websocket."""
@@ -54,6 +33,7 @@ class DiscordChannel(BaseChannel):
self._heartbeat_task: asyncio.Task | None = None
self._typing_tasks: dict[str, asyncio.Task] = {}
self._http: httpx.AsyncClient | None = None
self._bot_user_id: str | None = None
async def start(self) -> None:
"""Start the Discord gateway connection."""
@@ -104,7 +84,7 @@ class DiscordChannel(BaseChannel):
headers = {"Authorization": f"Bot {self.config.token}"}
try:
chunks = _split_message(msg.content or "")
chunks = split_message(msg.content or "", MAX_MESSAGE_LEN)
if not chunks:
return
@@ -170,6 +150,10 @@ class DiscordChannel(BaseChannel):
await self._identify()
elif op == 0 and event_type == "READY":
logger.info("Discord gateway READY")
# Capture bot user ID for mention detection
user_data = payload.get("user") or {}
self._bot_user_id = user_data.get("id")
logger.info("Discord bot connected as user {}", self._bot_user_id)
elif op == 0 and event_type == "MESSAGE_CREATE":
await self._handle_message_create(payload)
elif op == 7:
@@ -226,6 +210,7 @@ class DiscordChannel(BaseChannel):
sender_id = str(author.get("id", ""))
channel_id = str(payload.get("channel_id", ""))
content = payload.get("content") or ""
guild_id = payload.get("guild_id")
if not sender_id or not channel_id:
return
@@ -233,6 +218,11 @@ class DiscordChannel(BaseChannel):
if not self.is_allowed(sender_id):
return
# Check group channel policy (DMs always respond if is_allowed passes)
if guild_id is not None:
if not self._should_respond_in_group(payload, content):
return
content_parts = [content] if content else []
media_paths: list[str] = []
media_dir = Path.home() / ".nanobot" / "media"
@@ -269,11 +259,32 @@ class DiscordChannel(BaseChannel):
media=media_paths,
metadata={
"message_id": str(payload.get("id", "")),
"guild_id": payload.get("guild_id"),
"guild_id": guild_id,
"reply_to": reply_to,
},
)
def _should_respond_in_group(self, payload: dict[str, Any], content: str) -> bool:
"""Check if bot should respond in a group channel based on policy."""
if self.config.group_policy == "open":
return True
if self.config.group_policy == "mention":
# Check if bot was mentioned in the message
if self._bot_user_id:
# Check mentions array
mentions = payload.get("mentions") or []
for mention in mentions:
if str(mention.get("id")) == self._bot_user_id:
return True
# Also check content for mention format <@USER_ID>
if f"<@{self._bot_user_id}>" in content or f"<@!{self._bot_user_id}>" in content:
return True
logger.debug("Discord message in {} ignored (bot not mentioned)", payload.get("channel_id"))
return False
return True
async def _start_typing(self, channel_id: str) -> None:
"""Start periodic typing indicator for a channel."""
await self._stop_typing(channel_id)

View File

@@ -290,16 +290,28 @@ class FeishuChannel(BaseChannel):
log_level=lark.LogLevel.INFO
)
# Start WebSocket client in a separate thread with reconnect loop
# Start WebSocket client in a separate thread with reconnect loop.
# A dedicated event loop is created for this thread so that lark_oapi's
# module-level `loop = asyncio.get_event_loop()` picks up an idle loop
# instead of the already-running main asyncio loop, which would cause
# "This event loop is already running" errors.
def run_ws():
while self._running:
try:
self._ws_client.start()
except Exception as e:
logger.warning("Feishu WebSocket error: {}", e)
if self._running:
import time
time.sleep(5)
import time
import lark_oapi.ws.client as _lark_ws_client
ws_loop = asyncio.new_event_loop()
asyncio.set_event_loop(ws_loop)
# Patch the module-level loop used by lark's ws Client.start()
_lark_ws_client.loop = ws_loop
try:
while self._running:
try:
self._ws_client.start()
except Exception as e:
logger.warning("Feishu WebSocket error: {}", e)
if self._running:
time.sleep(5)
finally:
ws_loop.close()
self._ws_thread = threading.Thread(target=run_ws, daemon=True)
self._ws_thread.start()
@@ -398,6 +410,34 @@ class FeishuChannel(BaseChannel):
elements.extend(self._split_headings(remaining))
return elements or [{"tag": "markdown", "content": content}]
@staticmethod
def _split_elements_by_table_limit(elements: list[dict], max_tables: int = 1) -> list[list[dict]]:
"""Split card elements into groups with at most *max_tables* table elements each.
Feishu cards have a hard limit of one table per card (API error 11310).
When the rendered content contains multiple markdown tables each table is
placed in a separate card message so every table reaches the user.
"""
if not elements:
return [[]]
groups: list[list[dict]] = []
current: list[dict] = []
table_count = 0
for el in elements:
if el.get("tag") == "table":
if table_count >= max_tables:
if current:
groups.append(current)
current = []
table_count = 0
current.append(el)
table_count += 1
else:
current.append(el)
if current:
groups.append(current)
return groups or [[]]
def _split_headings(self, content: str) -> list[dict]:
"""Split content by headings, converting headings to div elements."""
protected = content
@@ -432,8 +472,124 @@ class FeishuChannel(BaseChannel):
return elements or [{"tag": "markdown", "content": content}]
# ── Smart format detection ──────────────────────────────────────────
# Patterns that indicate "complex" markdown needing card rendering
_COMPLEX_MD_RE = re.compile(
r"```" # fenced code block
r"|^\|.+\|.*\n\s*\|[-:\s|]+\|" # markdown table (header + separator)
r"|^#{1,6}\s+" # headings
, re.MULTILINE,
)
# Simple markdown patterns (bold, italic, strikethrough)
_SIMPLE_MD_RE = re.compile(
r"\*\*.+?\*\*" # **bold**
r"|__.+?__" # __bold__
r"|(?<!\*)\*(?!\*)(.+?)(?<!\*)\*(?!\*)" # *italic* (single *)
r"|~~.+?~~" # ~~strikethrough~~
, re.DOTALL,
)
# Markdown link: [text](url)
_MD_LINK_RE = re.compile(r"\[([^\]]+)\]\((https?://[^\)]+)\)")
# Unordered list items
_LIST_RE = re.compile(r"^[\s]*[-*+]\s+", re.MULTILINE)
# Ordered list items
_OLIST_RE = re.compile(r"^[\s]*\d+\.\s+", re.MULTILINE)
# Max length for plain text format
_TEXT_MAX_LEN = 200
# Max length for post (rich text) format; beyond this, use card
_POST_MAX_LEN = 2000
@classmethod
def _detect_msg_format(cls, content: str) -> str:
"""Determine the optimal Feishu message format for *content*.
Returns one of:
- ``"text"`` plain text, short and no markdown
- ``"post"`` rich text (links only, moderate length)
- ``"interactive"`` card with full markdown rendering
"""
stripped = content.strip()
# Complex markdown (code blocks, tables, headings) → always card
if cls._COMPLEX_MD_RE.search(stripped):
return "interactive"
# Long content → card (better readability with card layout)
if len(stripped) > cls._POST_MAX_LEN:
return "interactive"
# Has bold/italic/strikethrough → card (post format can't render these)
if cls._SIMPLE_MD_RE.search(stripped):
return "interactive"
# Has list items → card (post format can't render list bullets well)
if cls._LIST_RE.search(stripped) or cls._OLIST_RE.search(stripped):
return "interactive"
# Has links → post format (supports <a> tags)
if cls._MD_LINK_RE.search(stripped):
return "post"
# Short plain text → text format
if len(stripped) <= cls._TEXT_MAX_LEN:
return "text"
# Medium plain text without any formatting → post format
return "post"
@classmethod
def _markdown_to_post(cls, content: str) -> str:
"""Convert markdown content to Feishu post message JSON.
Handles links ``[text](url)`` as ``a`` tags; everything else as ``text`` tags.
Each line becomes a paragraph (row) in the post body.
"""
lines = content.strip().split("\n")
paragraphs: list[list[dict]] = []
for line in lines:
elements: list[dict] = []
last_end = 0
for m in cls._MD_LINK_RE.finditer(line):
# Text before this link
before = line[last_end:m.start()]
if before:
elements.append({"tag": "text", "text": before})
elements.append({
"tag": "a",
"text": m.group(1),
"href": m.group(2),
})
last_end = m.end()
# Remaining text after last link
remaining = line[last_end:]
if remaining:
elements.append({"tag": "text", "text": remaining})
# Empty line → empty paragraph for spacing
if not elements:
elements.append({"tag": "text", "text": ""})
paragraphs.append(elements)
post_body = {
"zh_cn": {
"content": paragraphs,
}
}
return json.dumps(post_body, ensure_ascii=False)
_IMAGE_EXTS = {".png", ".jpg", ".jpeg", ".gif", ".bmp", ".webp", ".ico", ".tiff", ".tif"}
_AUDIO_EXTS = {".opus"}
_VIDEO_EXTS = {".mp4", ".mov", ".avi"}
_FILE_TYPE_MAP = {
".opus": "opus", ".mp4": "mp4", ".pdf": "pdf", ".doc": "doc", ".docx": "doc",
".xls": "xls", ".xlsx": "xls", ".ppt": "ppt", ".pptx": "ppt",
@@ -642,18 +798,45 @@ class FeishuChannel(BaseChannel):
else:
key = await loop.run_in_executor(None, self._upload_file_sync, file_path)
if key:
media_type = "audio" if ext in self._AUDIO_EXTS else "file"
# Use msg_type "media" for audio/video so users can play inline;
# "file" for everything else (documents, archives, etc.)
if ext in self._AUDIO_EXTS or ext in self._VIDEO_EXTS:
media_type = "media"
else:
media_type = "file"
await loop.run_in_executor(
None, self._send_message_sync,
receive_id_type, msg.chat_id, media_type, json.dumps({"file_key": key}, ensure_ascii=False),
)
if msg.content and msg.content.strip():
card = {"config": {"wide_screen_mode": True}, "elements": self._build_card_elements(msg.content)}
await loop.run_in_executor(
None, self._send_message_sync,
receive_id_type, msg.chat_id, "interactive", json.dumps(card, ensure_ascii=False),
)
fmt = self._detect_msg_format(msg.content)
if fmt == "text":
# Short plain text send as simple text message
text_body = json.dumps({"text": msg.content.strip()}, ensure_ascii=False)
await loop.run_in_executor(
None, self._send_message_sync,
receive_id_type, msg.chat_id, "text", text_body,
)
elif fmt == "post":
# Medium content with links send as rich-text post
post_body = self._markdown_to_post(msg.content)
await loop.run_in_executor(
None, self._send_message_sync,
receive_id_type, msg.chat_id, "post", post_body,
)
else:
# Complex / long content send as interactive card
elements = self._build_card_elements(msg.content)
for chunk in self._split_elements_by_table_limit(elements):
card = {"config": {"wide_screen_mode": True}, "elements": chunk}
await loop.run_in_executor(
None, self._send_message_sync,
receive_id_type, msg.chat_id, "interactive", json.dumps(card, ensure_ascii=False),
)
except Exception as e:
logger.error("Error sending Feishu message: {}", e)

View File

@@ -14,6 +14,9 @@ from nanobot.bus.events import OutboundMessage
from nanobot.bus.queue import MessageBus
from nanobot.channels.base import BaseChannel
from nanobot.config.schema import TelegramConfig
from nanobot.utils.helpers import split_message
TELEGRAM_MAX_MESSAGE_LEN = 4000 # Telegram message character limit
def _markdown_to_telegram_html(text: str) -> str:
@@ -79,26 +82,6 @@ def _markdown_to_telegram_html(text: str) -> str:
return text
def _split_message(content: str, max_len: int = 4000) -> list[str]:
"""Split content into chunks within max_len, preferring line breaks."""
if len(content) <= max_len:
return [content]
chunks: list[str] = []
while content:
if len(content) <= max_len:
chunks.append(content)
break
cut = content[:max_len]
pos = cut.rfind('\n')
if pos == -1:
pos = cut.rfind(' ')
if pos == -1:
pos = max_len
chunks.append(content[:pos])
content = content[pos:].lstrip()
return chunks
class TelegramChannel(BaseChannel):
"""
Telegram channel using long polling.
@@ -225,7 +208,9 @@ class TelegramChannel(BaseChannel):
logger.warning("Telegram bot not running")
return
self._stop_typing(msg.chat_id)
# Only stop typing indicator for final responses
if not msg.metadata.get("_progress", False):
self._stop_typing(msg.chat_id)
try:
chat_id = int(msg.chat_id)
@@ -269,23 +254,41 @@ class TelegramChannel(BaseChannel):
# Send text content
if msg.content and msg.content != "[empty message]":
for chunk in _split_message(msg.content):
is_progress = msg.metadata.get("_progress", False)
draft_id = msg.metadata.get("message_id")
for chunk in split_message(msg.content, TELEGRAM_MAX_MESSAGE_LEN):
try:
html = _markdown_to_telegram_html(chunk)
await self._app.bot.send_message(
chat_id=chat_id,
text=html,
parse_mode="HTML",
reply_parameters=reply_params
)
if is_progress and draft_id:
await self._app.bot.send_message_draft(
chat_id=chat_id,
draft_id=draft_id,
text=html,
parse_mode="HTML"
)
else:
await self._app.bot.send_message(
chat_id=chat_id,
text=html,
parse_mode="HTML",
reply_parameters=reply_params
)
except Exception as e:
logger.warning("HTML parse failed, falling back to plain text: {}", e)
try:
await self._app.bot.send_message(
chat_id=chat_id,
text=chunk,
reply_parameters=reply_params
)
if is_progress and draft_id:
await self._app.bot.send_message_draft(
chat_id=chat_id,
draft_id=draft_id,
text=chunk
)
else:
await self._app.bot.send_message(
chat_id=chat_id,
text=chunk,
reply_parameters=reply_params
)
except Exception as e2:
logger.error("Error sending Telegram message: {}", e2)

View File

@@ -7,6 +7,18 @@ import signal
import sys
from pathlib import Path
# Force UTF-8 encoding for Windows console
if sys.platform == "win32":
import locale
if sys.stdout.encoding != "utf-8":
os.environ["PYTHONIOENCODING"] = "utf-8"
# Re-open stdout/stderr with UTF-8 encoding
try:
sys.stdout.reconfigure(encoding="utf-8", errors="replace")
sys.stderr.reconfigure(encoding="utf-8", errors="replace")
except Exception:
pass
import typer
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import HTML
@@ -200,8 +212,6 @@ def onboard():
def _make_provider(config: Config):
"""Create the appropriate LLM provider from config."""
from nanobot.providers.custom_provider import CustomProvider
from nanobot.providers.litellm_provider import LiteLLMProvider
from nanobot.providers.openai_codex_provider import OpenAICodexProvider
model = config.agents.defaults.model
@@ -213,6 +223,7 @@ def _make_provider(config: Config):
return OpenAICodexProvider(default_model=model)
# Custom: direct OpenAI-compatible endpoint, bypasses LiteLLM
from nanobot.providers.custom_provider import CustomProvider
if provider_name == "custom":
return CustomProvider(
api_key=p.api_key if p else "no-key",
@@ -220,6 +231,7 @@ def _make_provider(config: Config):
default_model=model,
)
from nanobot.providers.litellm_provider import LiteLLMProvider
from nanobot.providers.registry import find_by_name
spec = find_by_name(provider_name)
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and spec.is_oauth):
@@ -244,13 +256,15 @@ def _make_provider(config: Config):
@app.command()
def gateway(
port: int = typer.Option(18790, "--port", "-p", help="Gateway port"),
workspace: str | None = typer.Option(None, "--workspace", "-w", help="Workspace directory"),
config: str | None = typer.Option(None, "--config", "-c", help="Config file path"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
):
"""Start the nanobot gateway."""
from nanobot.agent.loop import AgentLoop
from nanobot.bus.queue import MessageBus
from nanobot.channels.manager import ChannelManager
from nanobot.config.loader import get_data_dir, load_config
from nanobot.config.loader import load_config
from nanobot.cron.service import CronService
from nanobot.cron.types import CronJob
from nanobot.heartbeat.service import HeartbeatService
@@ -260,16 +274,20 @@ def gateway(
import logging
logging.basicConfig(level=logging.DEBUG)
console.print(f"{__logo__} Starting nanobot gateway on port {port}...")
config_path = Path(config) if config else None
config = load_config(config_path)
if workspace:
config.agents.defaults.workspace = workspace
config = load_config()
console.print(f"{__logo__} Starting nanobot gateway on port {port}...")
sync_workspace_templates(config.workspace_path)
bus = MessageBus()
provider = _make_provider(config)
session_manager = SessionManager(config.workspace_path)
# Create cron service first (callback set after agent creation)
cron_store_path = get_data_dir() / "cron" / "jobs.json"
# Use workspace path for per-instance cron store
cron_store_path = config.workspace_path / "cron" / "jobs.json"
cron = CronService(cron_store_path)
# Create agent with cron service
@@ -511,12 +529,21 @@ def agent(
else:
cli_channel, cli_chat_id = "cli", session_id
def _exit_on_sigint(signum, frame):
def _handle_signal(signum, frame):
sig_name = signal.Signals(signum).name
_restore_terminal()
console.print("\nGoodbye!")
os._exit(0)
console.print(f"\nReceived {sig_name}, goodbye!")
sys.exit(0)
signal.signal(signal.SIGINT, _exit_on_sigint)
signal.signal(signal.SIGINT, _handle_signal)
signal.signal(signal.SIGTERM, _handle_signal)
# SIGHUP is not available on Windows
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, _handle_signal)
# Ignore SIGPIPE to prevent silent process termination when writing to closed pipes
# SIGPIPE is not available on Windows
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
async def run_interactive():
bus_task = asyncio.create_task(agent_loop.run())

View File

@@ -29,7 +29,9 @@ class TelegramConfig(Base):
enabled: bool = False
token: str = "" # Bot token from @BotFather
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs or usernames
proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
proxy: str | None = (
None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
)
reply_to_message: bool = False # If true, bot replies quote the original message
@@ -42,7 +44,9 @@ class FeishuConfig(Base):
encrypt_key: str = "" # Encrypt Key for event subscription (optional)
verification_token: str = "" # Verification Token for event subscription (optional)
allow_from: list[str] = Field(default_factory=list) # Allowed user open_ids
react_emoji: str = "THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE)
react_emoji: str = (
"THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE)
)
class DingTalkConfig(Base):
@@ -62,6 +66,7 @@ class DiscordConfig(Base):
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs
gateway_url: str = "wss://gateway.discord.gg/?v=10&encoding=json"
intents: int = 37377 # GUILDS + GUILD_MESSAGES + DIRECT_MESSAGES + MESSAGE_CONTENT
group_policy: Literal["mention", "open"] = "mention"
class MatrixConfig(Base):
@@ -72,9 +77,13 @@ class MatrixConfig(Base):
access_token: str = ""
user_id: str = "" # @bot:matrix.org
device_id: str = ""
e2ee_enabled: bool = True # Enable Matrix E2EE support (encryption + encrypted room handling).
sync_stop_grace_seconds: int = 2 # Max seconds to wait for sync_forever to stop gracefully before cancellation fallback.
max_media_bytes: int = 20 * 1024 * 1024 # Max attachment size accepted for Matrix media handling (inbound + outbound).
e2ee_enabled: bool = True # Enable Matrix E2EE support (encryption + encrypted room handling).
sync_stop_grace_seconds: int = (
2 # Max seconds to wait for sync_forever to stop gracefully before cancellation fallback.
)
max_media_bytes: int = (
20 * 1024 * 1024
) # Max attachment size accepted for Matrix media handling (inbound + outbound).
allow_from: list[str] = Field(default_factory=list)
group_policy: Literal["open", "mention", "allowlist"] = "open"
group_allow_from: list[str] = Field(default_factory=list)
@@ -105,7 +114,9 @@ class EmailConfig(Base):
from_address: str = ""
# Behavior
auto_reply_enabled: bool = True # If false, inbound email is read but no automatic reply is sent
auto_reply_enabled: bool = (
True # If false, inbound email is read but no automatic reply is sent
)
poll_interval_seconds: int = 30
mark_seen: bool = True
max_body_chars: int = 12000
@@ -183,27 +194,17 @@ class QQConfig(Base):
enabled: bool = False
app_id: str = "" # 机器人 ID (AppID) from q.qq.com
secret: str = "" # 机器人密钥 (AppSecret) from q.qq.com
allow_from: list[str] = Field(default_factory=list) # Allowed user openids (empty = public access)
allow_from: list[str] = Field(
default_factory=list
) # Allowed user openids (empty = public access)
class MatrixConfig(Base):
"""Matrix (Element) channel configuration."""
enabled: bool = False
homeserver: str = "https://matrix.org"
access_token: str = ""
user_id: str = "" # e.g. @bot:matrix.org
device_id: str = ""
e2ee_enabled: bool = True # end-to-end encryption support
sync_stop_grace_seconds: int = 2 # graceful sync_forever shutdown timeout
max_media_bytes: int = 20 * 1024 * 1024 # inbound + outbound attachment limit
allow_from: list[str] = Field(default_factory=list)
group_policy: Literal["open", "mention", "allowlist"] = "open"
group_allow_from: list[str] = Field(default_factory=list)
allow_room_mentions: bool = False
class ChannelsConfig(Base):
"""Configuration for chat channels."""
send_progress: bool = True # stream agent's text progress to the channel
send_progress: bool = True # stream agent's text progress to the channel
send_tool_hints: bool = False # stream tool-call hints (e.g. read_file("…"))
whatsapp: WhatsAppConfig = Field(default_factory=WhatsAppConfig)
telegram: TelegramConfig = Field(default_factory=TelegramConfig)
@@ -222,7 +223,9 @@ class AgentDefaults(Base):
workspace: str = "~/.nanobot/workspace"
model: str = "anthropic/claude-opus-4-5"
provider: str = "auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection
provider: str = (
"auto" # Provider name (e.g. "anthropic", "openrouter") or "auto" for auto-detection
)
max_tokens: int = 8192
temperature: float = 0.1
max_tool_iterations: int = 40
@@ -260,8 +263,8 @@ class ProvidersConfig(Base):
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动) API gateway
volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎) API gateway
siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动)
volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎)
openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
github_copilot: ProviderConfig = Field(default_factory=ProviderConfig) # Github Copilot (OAuth)
@@ -291,7 +294,9 @@ class WebSearchConfig(Base):
class WebToolsConfig(Base):
"""Web tools configuration."""
proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
proxy: str | None = (
None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
)
search: WebSearchConfig = Field(default_factory=WebSearchConfig)
@@ -305,12 +310,13 @@ class ExecToolConfig(Base):
class MCPServerConfig(Base):
"""MCP server connection configuration (stdio or HTTP)."""
type: Literal["stdio", "sse", "streamableHttp"] | None = None # auto-detected if omitted
command: str = "" # Stdio: command to run (e.g. "npx")
args: list[str] = Field(default_factory=list) # Stdio: command arguments
env: dict[str, str] = Field(default_factory=dict) # Stdio: extra env vars
url: str = "" # HTTP: streamable HTTP endpoint URL
headers: dict[str, str] = Field(default_factory=dict) # HTTP: Custom HTTP Headers
tool_timeout: int = 30 # Seconds before a tool call is cancelled
url: str = "" # HTTP/SSE: endpoint URL
headers: dict[str, str] = Field(default_factory=dict) # HTTP/SSE: custom headers
tool_timeout: int = 30 # seconds before a tool call is cancelled
class ToolsConfig(Base):
@@ -336,7 +342,9 @@ class Config(BaseSettings):
"""Get expanded workspace path."""
return Path(self.agents.defaults.workspace).expanduser()
def _match_provider(self, model: str | None = None) -> tuple["ProviderConfig | None", str | None]:
def _match_provider(
self, model: str | None = None
) -> tuple["ProviderConfig | None", str | None]:
"""Match provider config and its registry name. Returns (config, spec_name)."""
from nanobot.providers.registry import PROVIDERS

View File

@@ -2,6 +2,7 @@
from __future__ import annotations
import uuid
from typing import Any
import json_repair
@@ -15,7 +16,12 @@ class CustomProvider(LLMProvider):
def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"):
super().__init__(api_key, api_base)
self.default_model = default_model
self._client = AsyncOpenAI(api_key=api_key, base_url=api_base)
# Keep affinity stable for this provider instance to improve backend cache locality.
self._client = AsyncOpenAI(
api_key=api_key,
base_url=api_base,
default_headers={"x-session-affinity": uuid.uuid4().hex},
)
async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None,
model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7,

View File

@@ -8,6 +8,7 @@ from typing import Any
import json_repair
import litellm
from litellm import acompletion
from loguru import logger
from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
from nanobot.providers.registry import find_by_model, find_gateway
@@ -255,20 +256,37 @@ class LiteLLMProvider(LLMProvider):
"""Parse LiteLLM response into our standard format."""
choice = response.choices[0]
message = choice.message
content = message.content
finish_reason = choice.finish_reason
# Some providers (e.g. GitHub Copilot) split content and tool_calls
# across multiple choices. Merge them so tool_calls are not lost.
raw_tool_calls = []
for ch in response.choices:
msg = ch.message
if hasattr(msg, "tool_calls") and msg.tool_calls:
raw_tool_calls.extend(msg.tool_calls)
if ch.finish_reason in ("tool_calls", "stop"):
finish_reason = ch.finish_reason
if not content and msg.content:
content = msg.content
if len(response.choices) > 1:
logger.debug("LiteLLM response has {} choices, merged {} tool_calls",
len(response.choices), len(raw_tool_calls))
tool_calls = []
if hasattr(message, "tool_calls") and message.tool_calls:
for tc in message.tool_calls:
# Parse arguments from JSON string if needed
args = tc.function.arguments
if isinstance(args, str):
args = json_repair.loads(args)
for tc in raw_tool_calls:
# Parse arguments from JSON string if needed
args = tc.function.arguments
if isinstance(args, str):
args = json_repair.loads(args)
tool_calls.append(ToolCallRequest(
id=_short_tool_id(),
name=tc.function.name,
arguments=args,
))
tool_calls.append(ToolCallRequest(
id=_short_tool_id(),
name=tc.function.name,
arguments=args,
))
usage = {}
if hasattr(response, "usage") and response.usage:
@@ -280,11 +298,11 @@ class LiteLLMProvider(LLMProvider):
reasoning_content = getattr(message, "reasoning_content", None) or None
thinking_blocks = getattr(message, "thinking_blocks", None) or None
return LLMResponse(
content=message.content,
content=content,
tool_calls=tool_calls,
finish_reason=choice.finish_reason or "stop",
finish_reason=finish_reason or "stop",
usage=usage,
reasoning_content=reasoning_content,
thinking_blocks=thinking_blocks,

View File

@@ -26,33 +26,33 @@ class ProviderSpec:
"""
# identity
name: str # config field name, e.g. "dashscope"
keywords: tuple[str, ...] # model-name keywords for matching (lowercase)
env_key: str # LiteLLM env var, e.g. "DASHSCOPE_API_KEY"
display_name: str = "" # shown in `nanobot status`
name: str # config field name, e.g. "dashscope"
keywords: tuple[str, ...] # model-name keywords for matching (lowercase)
env_key: str # LiteLLM env var, e.g. "DASHSCOPE_API_KEY"
display_name: str = "" # shown in `nanobot status`
# model prefixing
litellm_prefix: str = "" # "dashscope" → model becomes "dashscope/{model}"
skip_prefixes: tuple[str, ...] = () # don't prefix if model already starts with these
litellm_prefix: str = "" # "dashscope" → model becomes "dashscope/{model}"
skip_prefixes: tuple[str, ...] = () # don't prefix if model already starts with these
# extra env vars, e.g. (("ZHIPUAI_API_KEY", "{api_key}"),)
env_extras: tuple[tuple[str, str], ...] = ()
# gateway / local detection
is_gateway: bool = False # routes any model (OpenRouter, AiHubMix)
is_local: bool = False # local deployment (vLLM, Ollama)
detect_by_key_prefix: str = "" # match api_key prefix, e.g. "sk-or-"
detect_by_base_keyword: str = "" # match substring in api_base URL
default_api_base: str = "" # fallback base URL
is_gateway: bool = False # routes any model (OpenRouter, AiHubMix)
is_local: bool = False # local deployment (vLLM, Ollama)
detect_by_key_prefix: str = "" # match api_key prefix, e.g. "sk-or-"
detect_by_base_keyword: str = "" # match substring in api_base URL
default_api_base: str = "" # fallback base URL
# gateway behavior
strip_model_prefix: bool = False # strip "provider/" before re-prefixing
strip_model_prefix: bool = False # strip "provider/" before re-prefixing
# per-model param overrides, e.g. (("kimi-k2.5", {"temperature": 1.0}),)
model_overrides: tuple[tuple[str, dict[str, Any]], ...] = ()
# OAuth-based providers (e.g., OpenAI Codex) don't use API keys
is_oauth: bool = False # if True, uses OAuth flow instead of API key
is_oauth: bool = False # if True, uses OAuth flow instead of API key
# Direct providers bypass LiteLLM entirely (e.g., CustomProvider)
is_direct: bool = False
@@ -70,7 +70,6 @@ class ProviderSpec:
# ---------------------------------------------------------------------------
PROVIDERS: tuple[ProviderSpec, ...] = (
# === Custom (direct OpenAI-compatible endpoint, bypasses LiteLLM) ======
ProviderSpec(
name="custom",
@@ -80,17 +79,15 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
litellm_prefix="",
is_direct=True,
),
# === Gateways (detected by api_key / api_base, not model name) =========
# Gateways can route any model, so they win in fallback.
# OpenRouter: global gateway, keys start with "sk-or-"
ProviderSpec(
name="openrouter",
keywords=("openrouter",),
env_key="OPENROUTER_API_KEY",
display_name="OpenRouter",
litellm_prefix="openrouter", # claude-3 → openrouter/claude-3
litellm_prefix="openrouter", # claude-3 → openrouter/claude-3
skip_prefixes=(),
env_extras=(),
is_gateway=True,
@@ -102,16 +99,15 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
model_overrides=(),
supports_prompt_caching=True,
),
# AiHubMix: global gateway, OpenAI-compatible interface.
# strip_model_prefix=True: it doesn't understand "anthropic/claude-3",
# so we strip to bare "claude-3" then re-prefix as "openai/claude-3".
ProviderSpec(
name="aihubmix",
keywords=("aihubmix",),
env_key="OPENAI_API_KEY", # OpenAI-compatible
env_key="OPENAI_API_KEY", # OpenAI-compatible
display_name="AiHubMix",
litellm_prefix="openai", # → openai/{model}
litellm_prefix="openai", # → openai/{model}
skip_prefixes=(),
env_extras=(),
is_gateway=True,
@@ -119,10 +115,9 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
detect_by_key_prefix="",
detect_by_base_keyword="aihubmix",
default_api_base="https://aihubmix.com/v1",
strip_model_prefix=True, # anthropic/claude-3 → claude-3 → openai/claude-3
strip_model_prefix=True, # anthropic/claude-3 → claude-3 → openai/claude-3
model_overrides=(),
),
# SiliconFlow (硅基流动): OpenAI-compatible gateway, model names keep org prefix
ProviderSpec(
name="siliconflow",
@@ -140,7 +135,6 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# VolcEngine (火山引擎): OpenAI-compatible gateway
ProviderSpec(
name="volcengine",
@@ -158,9 +152,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# === Standard providers (matched by model-name keywords) ===============
# Anthropic: LiteLLM recognizes "claude-*" natively, no prefix needed.
ProviderSpec(
name="anthropic",
@@ -179,7 +171,6 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
model_overrides=(),
supports_prompt_caching=True,
),
# OpenAI: LiteLLM recognizes "gpt-*" natively, no prefix needed.
ProviderSpec(
name="openai",
@@ -197,14 +188,13 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# OpenAI Codex: uses OAuth, not API key.
ProviderSpec(
name="openai_codex",
keywords=("openai-codex",),
env_key="", # OAuth-based, no API key
env_key="", # OAuth-based, no API key
display_name="OpenAI Codex",
litellm_prefix="", # Not routed through LiteLLM
litellm_prefix="", # Not routed through LiteLLM
skip_prefixes=(),
env_extras=(),
is_gateway=False,
@@ -214,16 +204,15 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
default_api_base="https://chatgpt.com/backend-api",
strip_model_prefix=False,
model_overrides=(),
is_oauth=True, # OAuth-based authentication
is_oauth=True, # OAuth-based authentication
),
# Github Copilot: uses OAuth, not API key.
ProviderSpec(
name="github_copilot",
keywords=("github_copilot", "copilot"),
env_key="", # OAuth-based, no API key
env_key="", # OAuth-based, no API key
display_name="Github Copilot",
litellm_prefix="github_copilot", # github_copilot/model → github_copilot/model
litellm_prefix="github_copilot", # github_copilot/model → github_copilot/model
skip_prefixes=("github_copilot/",),
env_extras=(),
is_gateway=False,
@@ -233,17 +222,16 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
default_api_base="",
strip_model_prefix=False,
model_overrides=(),
is_oauth=True, # OAuth-based authentication
is_oauth=True, # OAuth-based authentication
),
# DeepSeek: needs "deepseek/" prefix for LiteLLM routing.
ProviderSpec(
name="deepseek",
keywords=("deepseek",),
env_key="DEEPSEEK_API_KEY",
display_name="DeepSeek",
litellm_prefix="deepseek", # deepseek-chat → deepseek/deepseek-chat
skip_prefixes=("deepseek/",), # avoid double-prefix
litellm_prefix="deepseek", # deepseek-chat → deepseek/deepseek-chat
skip_prefixes=("deepseek/",), # avoid double-prefix
env_extras=(),
is_gateway=False,
is_local=False,
@@ -253,15 +241,14 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# Gemini: needs "gemini/" prefix for LiteLLM.
ProviderSpec(
name="gemini",
keywords=("gemini",),
env_key="GEMINI_API_KEY",
display_name="Gemini",
litellm_prefix="gemini", # gemini-pro → gemini/gemini-pro
skip_prefixes=("gemini/",), # avoid double-prefix
litellm_prefix="gemini", # gemini-pro → gemini/gemini-pro
skip_prefixes=("gemini/",), # avoid double-prefix
env_extras=(),
is_gateway=False,
is_local=False,
@@ -271,7 +258,6 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# Zhipu: LiteLLM uses "zai/" prefix.
# Also mirrors key to ZHIPUAI_API_KEY (some LiteLLM paths check that).
# skip_prefixes: don't add "zai/" when already routed via gateway.
@@ -280,11 +266,9 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
keywords=("zhipu", "glm", "zai"),
env_key="ZAI_API_KEY",
display_name="Zhipu AI",
litellm_prefix="zai", # glm-4 → zai/glm-4
litellm_prefix="zai", # glm-4 → zai/glm-4
skip_prefixes=("zhipu/", "zai/", "openrouter/", "hosted_vllm/"),
env_extras=(
("ZHIPUAI_API_KEY", "{api_key}"),
),
env_extras=(("ZHIPUAI_API_KEY", "{api_key}"),),
is_gateway=False,
is_local=False,
detect_by_key_prefix="",
@@ -293,14 +277,13 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# DashScope: Qwen models, needs "dashscope/" prefix.
ProviderSpec(
name="dashscope",
keywords=("qwen", "dashscope"),
env_key="DASHSCOPE_API_KEY",
display_name="DashScope",
litellm_prefix="dashscope", # qwen-max → dashscope/qwen-max
litellm_prefix="dashscope", # qwen-max → dashscope/qwen-max
skip_prefixes=("dashscope/", "openrouter/"),
env_extras=(),
is_gateway=False,
@@ -311,7 +294,6 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# Moonshot: Kimi models, needs "moonshot/" prefix.
# LiteLLM requires MOONSHOT_API_BASE env var to find the endpoint.
# Kimi K2.5 API enforces temperature >= 1.0.
@@ -320,22 +302,17 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
keywords=("moonshot", "kimi"),
env_key="MOONSHOT_API_KEY",
display_name="Moonshot",
litellm_prefix="moonshot", # kimi-k2.5 → moonshot/kimi-k2.5
litellm_prefix="moonshot", # kimi-k2.5 → moonshot/kimi-k2.5
skip_prefixes=("moonshot/", "openrouter/"),
env_extras=(
("MOONSHOT_API_BASE", "{api_base}"),
),
env_extras=(("MOONSHOT_API_BASE", "{api_base}"),),
is_gateway=False,
is_local=False,
detect_by_key_prefix="",
detect_by_base_keyword="",
default_api_base="https://api.moonshot.ai/v1", # intl; use api.moonshot.cn for China
default_api_base="https://api.moonshot.ai/v1", # intl; use api.moonshot.cn for China
strip_model_prefix=False,
model_overrides=(
("kimi-k2.5", {"temperature": 1.0}),
),
model_overrides=(("kimi-k2.5", {"temperature": 1.0}),),
),
# MiniMax: needs "minimax/" prefix for LiteLLM routing.
# Uses OpenAI-compatible API at api.minimax.io/v1.
ProviderSpec(
@@ -343,7 +320,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
keywords=("minimax",),
env_key="MINIMAX_API_KEY",
display_name="MiniMax",
litellm_prefix="minimax", # MiniMax-M2.1 → minimax/MiniMax-M2.1
litellm_prefix="minimax", # MiniMax-M2.1 → minimax/MiniMax-M2.1
skip_prefixes=("minimax/", "openrouter/"),
env_extras=(),
is_gateway=False,
@@ -354,9 +331,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
),
# === Local deployment (matched by config key, NOT by api_base) =========
# vLLM / any OpenAI-compatible local server.
# Detected when config key is "vllm" (provider_name="vllm").
ProviderSpec(
@@ -364,20 +339,18 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
keywords=("vllm",),
env_key="HOSTED_VLLM_API_KEY",
display_name="vLLM/Local",
litellm_prefix="hosted_vllm", # Llama-3-8B → hosted_vllm/Llama-3-8B
litellm_prefix="hosted_vllm", # Llama-3-8B → hosted_vllm/Llama-3-8B
skip_prefixes=(),
env_extras=(),
is_gateway=False,
is_local=True,
detect_by_key_prefix="",
detect_by_base_keyword="",
default_api_base="", # user must provide in config
default_api_base="", # user must provide in config
strip_model_prefix=False,
model_overrides=(),
),
# === Auxiliary (not a primary LLM provider) ============================
# Groq: mainly used for Whisper voice transcription, also usable for LLM.
# Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback.
ProviderSpec(
@@ -385,8 +358,8 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
keywords=("groq",),
env_key="GROQ_API_KEY",
display_name="Groq",
litellm_prefix="groq", # llama3-8b-8192 → groq/llama3-8b-8192
skip_prefixes=("groq/",), # avoid double-prefix
litellm_prefix="groq", # llama3-8b-8192 → groq/llama3-8b-8192
skip_prefixes=("groq/",), # avoid double-prefix
env_extras=(),
is_gateway=False,
is_local=False,
@@ -403,6 +376,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
# Lookup helpers
# ---------------------------------------------------------------------------
def find_by_model(model: str) -> ProviderSpec | None:
"""Match a standard provider by model-name keyword (case-insensitive).
Skips gateways/local — those are matched by api_key/api_base instead."""
@@ -418,7 +392,9 @@ def find_by_model(model: str) -> ProviderSpec | None:
return spec
for spec in std_specs:
if any(kw in model_lower or kw.replace("-", "_") in model_normalized for kw in spec.keywords):
if any(
kw in model_lower or kw.replace("-", "_") in model_normalized for kw in spec.keywords
):
return spec
return None

View File

@@ -5,6 +5,19 @@ from datetime import datetime
from pathlib import Path
def detect_image_mime(data: bytes) -> str | None:
"""Detect image MIME type from magic bytes, ignoring file extension."""
if data[:8] == b"\x89PNG\r\n\x1a\n":
return "image/png"
if data[:3] == b"\xff\xd8\xff":
return "image/jpeg"
if data[:6] in (b"GIF87a", b"GIF89a"):
return "image/gif"
if data[:4] == b"RIFF" and data[8:12] == b"WEBP":
return "image/webp"
return None
def ensure_dir(path: Path) -> Path:
"""Ensure directory exists, return it."""
path.mkdir(parents=True, exist_ok=True)
@@ -34,6 +47,38 @@ def safe_filename(name: str) -> str:
return _UNSAFE_CHARS.sub("_", name).strip()
def split_message(content: str, max_len: int = 2000) -> list[str]:
"""
Split content into chunks within max_len, preferring line breaks.
Args:
content: The text content to split.
max_len: Maximum length per chunk (default 2000 for Discord compatibility).
Returns:
List of message chunks, each within max_len.
"""
if not content:
return []
if len(content) <= max_len:
return [content]
chunks: list[str] = []
while content:
if len(content) <= max_len:
chunks.append(content)
break
cut = content[:max_len]
# Try to break at newline first, then space, then hard break
pos = cut.rfind('\n')
if pos <= 0:
pos = cut.rfind(' ')
if pos <= 0:
pos = max_len
chunks.append(content[:pos])
content = content[pos:].lstrip()
return chunks
def sync_workspace_templates(workspace: Path, silent: bool = False) -> list[str]:
"""Sync bundled templates to workspace. Only creates missing files."""
from importlib.resources import files as pkg_files