fix: prevent session poisoning from null/error LLM responses
When an LLM returns content: null on a plain assistant message (no tool_calls), the null gets saved to session history and causes permanent 400 errors on every subsequent request. - Sanitize None content on plain assistant messages to "(empty)" in _sanitize_empty_content(), matching the existing empty-string handling - Skip persisting error responses (finish_reason="error") to the message history in _run_agent_loop(), preventing poison loops Closes #1303
This commit is contained in:
@@ -224,6 +224,12 @@ class AgentLoop:
|
||||
)
|
||||
else:
|
||||
clean = self._strip_think(response.content)
|
||||
# Don't persist error responses to session history — they can
|
||||
# poison the context and cause permanent 400 loops (#1303).
|
||||
if response.finish_reason == "error":
|
||||
logger.error("LLM returned error: {}", (clean or "")[:200])
|
||||
final_content = clean or "Sorry, I encountered an error calling the AI model."
|
||||
break
|
||||
messages = self.context.add_assistant_message(
|
||||
messages, clean, reasoning_content=response.reasoning_content,
|
||||
)
|
||||
|
||||
@@ -51,6 +51,14 @@ class LLMProvider(ABC):
|
||||
for msg in messages:
|
||||
content = msg.get("content")
|
||||
|
||||
# None content on a plain assistant message (no tool_calls) crashes
|
||||
# providers with "invalid message content type: <nil>".
|
||||
if content is None and msg.get("role") == "assistant" and not msg.get("tool_calls"):
|
||||
clean = dict(msg)
|
||||
clean["content"] = "(empty)"
|
||||
result.append(clean)
|
||||
continue
|
||||
|
||||
if isinstance(content, str) and not content:
|
||||
clean = dict(msg)
|
||||
clean["content"] = None if (msg.get("role") == "assistant" and msg.get("tool_calls")) else "(empty)"
|
||||
|
||||
Reference in New Issue
Block a user