Merge remote-tracking branch 'origin/main' into pr-1294

This commit is contained in:
Re-bin
2026-02-28 08:01:04 +00:00
6 changed files with 23 additions and 5 deletions

View File

@@ -420,7 +420,7 @@ Uses **WebSocket** long connection — no public IP required.
**1. Create a Feishu bot** **1. Create a Feishu bot**
- Visit [Feishu Open Platform](https://open.feishu.cn/app) - Visit [Feishu Open Platform](https://open.feishu.cn/app)
- Create a new app → Enable **Bot** capability - Create a new app → Enable **Bot** capability
- **Permissions**: Add `im:message` (send messages) - **Permissions**: Add `im:message` (send messages) and `im:message.p2p_msg:readonly` (receive messages)
- **Events**: Add `im.message.receive_v1` (receive messages) - **Events**: Add `im.message.receive_v1` (receive messages)
- Select **Long Connection** mode (requires running nanobot first to establish connection) - Select **Long Connection** mode (requires running nanobot first to establish connection)
- Get **App ID** and **App Secret** from "Credentials & Basic Info" - Get **App ID** and **App Secret** from "Credentials & Basic Info"

View File

@@ -227,6 +227,12 @@ class AgentLoop:
) )
else: else:
clean = self._strip_think(response.content) clean = self._strip_think(response.content)
# Don't persist error responses to session history — they can
# poison the context and cause permanent 400 loops (#1303).
if response.finish_reason == "error":
logger.error("LLM returned error: {}", (clean or "")[:200])
final_content = clean or "Sorry, I encountered an error calling the AI model."
break
messages = self.context.add_assistant_message( messages = self.context.add_assistant_message(
messages, clean, reasoning_content=response.reasoning_content, messages, clean, reasoning_content=response.reasoning_content,
) )
@@ -448,6 +454,8 @@ class AgentLoop:
for m in messages[skip:]: for m in messages[skip:]:
entry = {k: v for k, v in m.items() if k != "reasoning_content"} entry = {k: v for k, v in m.items() if k != "reasoning_content"}
role, content = entry.get("role"), entry.get("content") role, content = entry.get("role"), entry.get("content")
if role == "assistant" and not content and not entry.get("tool_calls"):
continue # skip empty assistant messages — they poison session context
if role == "tool" and isinstance(content, str) and len(content) > self._TOOL_RESULT_MAX_CHARS: if role == "tool" and isinstance(content, str) and len(content) > self._TOOL_RESULT_MAX_CHARS:
entry["content"] = content[:self._TOOL_RESULT_MAX_CHARS] + "\n... (truncated)" entry["content"] = content[:self._TOOL_RESULT_MAX_CHARS] + "\n... (truncated)"
elif role == "user": elif role == "user":

View File

@@ -89,8 +89,9 @@ def _extract_interactive_content(content: dict) -> list[str]:
elif isinstance(title, str): elif isinstance(title, str):
parts.append(f"title: {title}") parts.append(f"title: {title}")
for element in content.get("elements", []) if isinstance(content.get("elements"), list) else []: for elements in content.get("elements", []) if isinstance(content.get("elements"), list) else []:
parts.extend(_extract_element_content(element)) for element in elements:
parts.extend(_extract_element_content(element))
card = content.get("card", {}) card = content.get("card", {})
if card: if card:

View File

@@ -100,10 +100,12 @@ class QQChannel(BaseChannel):
logger.warning("QQ client not initialized") logger.warning("QQ client not initialized")
return return
try: try:
msg_id = msg.metadata.get("message_id")
await self._client.api.post_c2c_message( await self._client.api.post_c2c_message(
openid=msg.chat_id, openid=msg.chat_id,
msg_type=0, msg_type=0,
content=msg.content, content=msg.content,
msg_id=msg_id,
) )
except Exception as e: except Exception as e:
logger.error("Error sending QQ message: {}", e) logger.error("Error sending QQ message: {}", e)

View File

@@ -3,6 +3,8 @@
import json import json
import json_repair import json_repair
import os import os
import secrets
import string
from typing import Any from typing import Any
import litellm import litellm
@@ -15,6 +17,11 @@ from nanobot.providers.registry import find_by_model, find_gateway
# Standard OpenAI chat-completion message keys plus reasoning_content for # Standard OpenAI chat-completion message keys plus reasoning_content for
# thinking-enabled models (Kimi k2.5, DeepSeek-R1, etc.). # thinking-enabled models (Kimi k2.5, DeepSeek-R1, etc.).
_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content"}) _ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name", "reasoning_content"})
_ALNUM = string.ascii_letters + string.digits
def _short_tool_id() -> str:
"""Generate a 9-char alphanumeric ID compatible with all providers (incl. Mistral)."""
return "".join(secrets.choice(_ALNUM) for _ in range(9))
class LiteLLMProvider(LLMProvider): class LiteLLMProvider(LLMProvider):
@@ -245,7 +252,7 @@ class LiteLLMProvider(LLMProvider):
args = json_repair.loads(args) args = json_repair.loads(args)
tool_calls.append(ToolCallRequest( tool_calls.append(ToolCallRequest(
id=tc.id, id=_short_tool_id(),
name=tc.function.name, name=tc.function.name,
arguments=args, arguments=args,
)) ))

View File

@@ -201,7 +201,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
# OpenAI Codex: uses OAuth, not API key. # OpenAI Codex: uses OAuth, not API key.
ProviderSpec( ProviderSpec(
name="openai_codex", name="openai_codex",
keywords=("openai-codex", "codex"), keywords=("openai-codex",),
env_key="", # OAuth-based, no API key env_key="", # OAuth-based, no API key
display_name="OpenAI Codex", display_name="OpenAI Codex",
litellm_prefix="", # Not routed through LiteLLM litellm_prefix="", # Not routed through LiteLLM