fix: sanitize messages and ensure 'content' for strict LLM providers
- Strip non-standard keys like 'reasoning_content' before sending to LLM - Always include 'content' key in assistant messages (required by StepFun) - Add _sanitize_messages to LiteLLMProvider to prevent 400 BadRequest errors
This commit is contained in:
@@ -227,9 +227,9 @@ To recall past events, grep {workspace_path}/memory/HISTORY.md"""
|
|||||||
"""
|
"""
|
||||||
msg: dict[str, Any] = {"role": "assistant"}
|
msg: dict[str, Any] = {"role": "assistant"}
|
||||||
|
|
||||||
# Omit empty content — some backends reject empty text blocks
|
# Always include content — some providers (e.g. StepFun) reject
|
||||||
if content:
|
# assistant messages that omit the key entirely.
|
||||||
msg["content"] = content
|
msg["content"] = content
|
||||||
|
|
||||||
if tool_calls:
|
if tool_calls:
|
||||||
msg["tool_calls"] = tool_calls
|
msg["tool_calls"] = tool_calls
|
||||||
|
|||||||
@@ -12,6 +12,12 @@ from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
|
|||||||
from nanobot.providers.registry import find_by_model, find_gateway
|
from nanobot.providers.registry import find_by_model, find_gateway
|
||||||
|
|
||||||
|
|
||||||
|
# Keys that are part of the OpenAI chat-completion message schema.
|
||||||
|
# Anything else (e.g. reasoning_content, timestamp) is stripped before sending
|
||||||
|
# to avoid "Unrecognized chat message" errors from strict providers like StepFun.
|
||||||
|
_ALLOWED_MSG_KEYS = frozenset({"role", "content", "tool_calls", "tool_call_id", "name"})
|
||||||
|
|
||||||
|
|
||||||
class LiteLLMProvider(LLMProvider):
|
class LiteLLMProvider(LLMProvider):
|
||||||
"""
|
"""
|
||||||
LLM provider using LiteLLM for multi-provider support.
|
LLM provider using LiteLLM for multi-provider support.
|
||||||
@@ -103,6 +109,24 @@ class LiteLLMProvider(LLMProvider):
|
|||||||
kwargs.update(overrides)
|
kwargs.update(overrides)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _sanitize_messages(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||||
|
"""Strip non-standard keys from messages for strict providers.
|
||||||
|
|
||||||
|
Some providers (e.g. StepFun via OpenRouter) reject messages that
|
||||||
|
contain extra keys like ``reasoning_content``. This method keeps
|
||||||
|
only the keys defined in the OpenAI chat-completion schema and
|
||||||
|
ensures every assistant message has a ``content`` key.
|
||||||
|
"""
|
||||||
|
sanitized = []
|
||||||
|
for msg in messages:
|
||||||
|
clean = {k: v for k, v in msg.items() if k in _ALLOWED_MSG_KEYS}
|
||||||
|
# Strict providers require "content" even when assistant only has tool_calls
|
||||||
|
if clean.get("role") == "assistant" and "content" not in clean:
|
||||||
|
clean["content"] = None
|
||||||
|
sanitized.append(clean)
|
||||||
|
return sanitized
|
||||||
|
|
||||||
async def chat(
|
async def chat(
|
||||||
self,
|
self,
|
||||||
messages: list[dict[str, Any]],
|
messages: list[dict[str, Any]],
|
||||||
@@ -132,7 +156,7 @@ class LiteLLMProvider(LLMProvider):
|
|||||||
|
|
||||||
kwargs: dict[str, Any] = {
|
kwargs: dict[str, Any] = {
|
||||||
"model": model,
|
"model": model,
|
||||||
"messages": messages,
|
"messages": self._sanitize_messages(messages),
|
||||||
"max_tokens": max_tokens,
|
"max_tokens": max_tokens,
|
||||||
"temperature": temperature,
|
"temperature": temperature,
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user