Merge branch 'main' into pr-1900

This commit is contained in:
Re-bin
2026-03-12 06:06:26 +00:00
14 changed files with 172 additions and 16 deletions

View File

@@ -4,7 +4,9 @@ from __future__ import annotations
import asyncio
import json
import os
import re
import sys
from contextlib import AsyncExitStack
from pathlib import Path
from typing import TYPE_CHECKING, Any, Awaitable, Callable
@@ -43,7 +45,7 @@ class AgentLoop:
5. Sends responses back
"""
_TOOL_RESULT_MAX_CHARS = 500
_TOOL_RESULT_MAX_CHARS = 16_000
def __init__(
self,
@@ -256,8 +258,11 @@ class AgentLoop:
except asyncio.TimeoutError:
continue
if msg.content.strip().lower() == "/stop":
cmd = msg.content.strip().lower()
if cmd == "/stop":
await self._handle_stop(msg)
elif cmd == "/restart":
await self._handle_restart(msg)
else:
task = asyncio.create_task(self._dispatch(msg))
self._active_tasks.setdefault(msg.session_key, []).append(task)
@@ -274,11 +279,23 @@ class AgentLoop:
pass
sub_cancelled = await self.subagents.cancel_by_session(msg.session_key)
total = cancelled + sub_cancelled
content = f"Stopped {total} task(s)." if total else "No active task to stop."
content = f"Stopped {total} task(s)." if total else "No active task to stop."
await self.bus.publish_outbound(OutboundMessage(
channel=msg.channel, chat_id=msg.chat_id, content=content,
))
async def _handle_restart(self, msg: InboundMessage) -> None:
"""Restart the process in-place via os.execv."""
await self.bus.publish_outbound(OutboundMessage(
channel=msg.channel, chat_id=msg.chat_id, content="Restarting...",
))
async def _do_restart():
await asyncio.sleep(1)
os.execv(sys.executable, [sys.executable] + sys.argv)
asyncio.create_task(_do_restart())
async def _dispatch(self, msg: InboundMessage) -> None:
"""Process a message under the global lock."""
async with self._processing_lock:
@@ -373,9 +390,16 @@ class AgentLoop:
return OutboundMessage(channel=msg.channel, chat_id=msg.chat_id,
content="New session started.")
if cmd == "/help":
return OutboundMessage(channel=msg.channel, chat_id=msg.chat_id,
content="🐈 nanobot commands:\n/new — Start a new conversation\n/stop — Stop the current task\n/help — Show available commands")
lines = [
"🐈 nanobot commands:",
"/new — Start a new conversation",
"/stop — Stop the current task",
"/restart — Restart the bot",
"/help — Show available commands",
]
return OutboundMessage(
channel=msg.channel, chat_id=msg.chat_id, content="\n".join(lines),
)
await self.memory_consolidator.maybe_consolidate_by_tokens(session)
self._set_tool_context(msg.channel, msg.chat_id, msg.metadata.get("message_id"))

View File

@@ -120,6 +120,7 @@ class MemoryStore:
],
tools=_SAVE_MEMORY_TOOL,
model=model,
tool_choice="required",
)
if not response.has_tool_calls:

View File

@@ -352,6 +352,27 @@ class FeishuChannel(BaseChannel):
self._running = False
logger.info("Feishu bot stopped")
def _is_bot_mentioned(self, message: Any) -> bool:
"""Check if the bot is @mentioned in the message."""
raw_content = message.content or ""
if "@_all" in raw_content:
return True
for mention in getattr(message, "mentions", None) or []:
mid = getattr(mention, "id", None)
if not mid:
continue
# Bot mentions have no user_id (None or "") but a valid open_id
if not getattr(mid, "user_id", None) and (getattr(mid, "open_id", None) or "").startswith("ou_"):
return True
return False
def _is_group_message_for_bot(self, message: Any) -> bool:
"""Allow group messages when policy is open or bot is @mentioned."""
if self.config.group_policy == "open":
return True
return self._is_bot_mentioned(message)
def _add_reaction_sync(self, message_id: str, emoji_type: str) -> None:
"""Sync helper for adding reaction (runs in thread pool)."""
from lark_oapi.api.im.v1 import CreateMessageReactionRequest, CreateMessageReactionRequestBody, Emoji
@@ -893,6 +914,10 @@ class FeishuChannel(BaseChannel):
chat_type = message.chat_type
msg_type = message.message_type
if chat_type == "group" and not self._is_group_message_for_bot(message):
logger.debug("Feishu: skipping group message (not mentioned)")
return
# Add reaction
await self._add_reaction(message_id, self.config.react_emoji)

View File

@@ -164,6 +164,7 @@ class TelegramChannel(BaseChannel):
BotCommand("new", "Start a new conversation"),
BotCommand("stop", "Stop the current task"),
BotCommand("help", "Show available commands"),
BotCommand("restart", "Restart the bot"),
]
def __init__(self, config: TelegramConfig, bus: MessageBus):
@@ -221,6 +222,7 @@ class TelegramChannel(BaseChannel):
self._app.add_handler(CommandHandler("start", self._on_start))
self._app.add_handler(CommandHandler("new", self._forward_command))
self._app.add_handler(CommandHandler("stop", self._forward_command))
self._app.add_handler(CommandHandler("restart", self._forward_command))
self._app.add_handler(CommandHandler("help", self._on_help))
# Add message handler for text, photos, voice, documents

View File

@@ -48,6 +48,7 @@ class FeishuConfig(Base):
react_emoji: str = (
"THUMBSUP" # Emoji type for message reactions (e.g. THUMBSUP, OK, DONE, SMILE)
)
group_policy: Literal["open", "mention"] = "mention" # "mention" responds when @mentioned, "open" responds to all
class DingTalkConfig(Base):

View File

@@ -88,6 +88,7 @@ class AzureOpenAIProvider(LLMProvider):
max_tokens: int = 4096,
temperature: float = 0.7,
reasoning_effort: str | None = None,
tool_choice: str | dict[str, Any] | None = None,
) -> dict[str, Any]:
"""Prepare the request payload with Azure OpenAI 2024-10-21 compliance."""
payload: dict[str, Any] = {
@@ -106,7 +107,7 @@ class AzureOpenAIProvider(LLMProvider):
if tools:
payload["tools"] = tools
payload["tool_choice"] = "auto"
payload["tool_choice"] = tool_choice or "auto"
return payload
@@ -118,6 +119,7 @@ class AzureOpenAIProvider(LLMProvider):
max_tokens: int = 4096,
temperature: float = 0.7,
reasoning_effort: str | None = None,
tool_choice: str | dict[str, Any] | None = None,
) -> LLMResponse:
"""
Send a chat completion request to Azure OpenAI.
@@ -137,7 +139,8 @@ class AzureOpenAIProvider(LLMProvider):
url = self._build_chat_url(deployment_name)
headers = self._build_headers()
payload = self._prepare_request_payload(
deployment_name, messages, tools, max_tokens, temperature, reasoning_effort
deployment_name, messages, tools, max_tokens, temperature, reasoning_effort,
tool_choice=tool_choice,
)
try:

View File

@@ -166,6 +166,7 @@ class LLMProvider(ABC):
max_tokens: int = 4096,
temperature: float = 0.7,
reasoning_effort: str | None = None,
tool_choice: str | dict[str, Any] | None = None,
) -> LLMResponse:
"""
Send a chat completion request.
@@ -176,6 +177,7 @@ class LLMProvider(ABC):
model: Model identifier (provider-specific).
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.
tool_choice: Tool selection strategy ("auto", "required", or specific tool dict).
Returns:
LLMResponse with content and/or tool calls.
@@ -195,6 +197,7 @@ class LLMProvider(ABC):
max_tokens: object = _SENTINEL,
temperature: object = _SENTINEL,
reasoning_effort: object = _SENTINEL,
tool_choice: str | dict[str, Any] | None = None,
) -> LLMResponse:
"""Call chat() with retry on transient provider failures.
@@ -218,6 +221,7 @@ class LLMProvider(ABC):
max_tokens=max_tokens,
temperature=temperature,
reasoning_effort=reasoning_effort,
tool_choice=tool_choice,
)
except asyncio.CancelledError:
raise
@@ -250,6 +254,7 @@ class LLMProvider(ABC):
max_tokens=max_tokens,
temperature=temperature,
reasoning_effort=reasoning_effort,
tool_choice=tool_choice,
)
except asyncio.CancelledError:
raise

View File

@@ -25,7 +25,8 @@ class CustomProvider(LLMProvider):
async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None,
model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7,
reasoning_effort: str | None = None) -> LLMResponse:
reasoning_effort: str | None = None,
tool_choice: str | dict[str, Any] | None = None) -> LLMResponse:
kwargs: dict[str, Any] = {
"model": model or self.default_model,
"messages": self._sanitize_empty_content(messages),
@@ -35,7 +36,7 @@ class CustomProvider(LLMProvider):
if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort
if tools:
kwargs.update(tools=tools, tool_choice="auto")
kwargs.update(tools=tools, tool_choice=tool_choice or "auto")
try:
return self._parse(await self._client.chat.completions.create(**kwargs))
except Exception as e:

View File

@@ -214,6 +214,7 @@ class LiteLLMProvider(LLMProvider):
max_tokens: int = 4096,
temperature: float = 0.7,
reasoning_effort: str | None = None,
tool_choice: str | dict[str, Any] | None = None,
) -> LLMResponse:
"""
Send a chat completion request via LiteLLM.
@@ -267,7 +268,7 @@ class LiteLLMProvider(LLMProvider):
if tools:
kwargs["tools"] = tools
kwargs["tool_choice"] = "auto"
kwargs["tool_choice"] = tool_choice or "auto"
try:
response = await acompletion(**kwargs)

View File

@@ -32,6 +32,7 @@ class OpenAICodexProvider(LLMProvider):
max_tokens: int = 4096,
temperature: float = 0.7,
reasoning_effort: str | None = None,
tool_choice: str | dict[str, Any] | None = None,
) -> LLMResponse:
model = model or self.default_model
system_prompt, input_items = _convert_messages(messages)
@@ -48,7 +49,7 @@ class OpenAICodexProvider(LLMProvider):
"text": {"verbosity": "medium"},
"include": ["reasoning.encrypted_content"],
"prompt_cache_key": _prompt_cache_key(messages),
"tool_choice": "auto",
"tool_choice": tool_choice or "auto",
"parallel_tool_calls": True,
}