fix(providers): normalize empty reasoning_content to None at provider level

PR #947 fixed the consumer side (context.py) but the root cause is at
the provider level — getattr returns "" (empty string) instead of None
when reasoning_content is empty. This causes DeepSeek API to reject the
request with "Missing reasoning_content field" error.

`"" or None` evaluates to None, preventing empty strings from
propagating downstream.

Fixes #946
This commit is contained in:
andienguyen-ecoligo
2026-02-21 12:30:57 -05:00
parent 0040c62b74
commit de63c31d43
2 changed files with 2 additions and 2 deletions

View File

@@ -40,7 +40,7 @@ class CustomProvider(LLMProvider):
return LLMResponse( return LLMResponse(
content=msg.content, tool_calls=tool_calls, finish_reason=choice.finish_reason or "stop", content=msg.content, tool_calls=tool_calls, finish_reason=choice.finish_reason or "stop",
usage={"prompt_tokens": u.prompt_tokens, "completion_tokens": u.completion_tokens, "total_tokens": u.total_tokens} if u else {}, usage={"prompt_tokens": u.prompt_tokens, "completion_tokens": u.completion_tokens, "total_tokens": u.total_tokens} if u else {},
reasoning_content=getattr(msg, "reasoning_content", None), reasoning_content=getattr(msg, "reasoning_content", None) or None,
) )
def get_default_model(self) -> str: def get_default_model(self) -> str:

View File

@@ -257,7 +257,7 @@ class LiteLLMProvider(LLMProvider):
"total_tokens": response.usage.total_tokens, "total_tokens": response.usage.total_tokens,
} }
reasoning_content = getattr(message, "reasoning_content", None) reasoning_content = getattr(message, "reasoning_content", None) or None
return LLMResponse( return LLMResponse(
content=message.content, content=message.content,