fix(custom_provider): show raw API error instead of JSONDecodeError
When an OpenAI-compatible API returns a non-JSON response (e.g. plain text "unsupported model: xxx" with HTTP 200), the OpenAI SDK raises a JSONDecodeError whose message is the unhelpful "Expecting value: line 1 column 1 (char 0)". Extract the original response body from JSONDecodeError.doc (or APIError.response.text) so users see the actual error message from the API. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -51,6 +51,12 @@ class CustomProvider(LLMProvider):
|
|||||||
try:
|
try:
|
||||||
return self._parse(await self._client.chat.completions.create(**kwargs))
|
return self._parse(await self._client.chat.completions.create(**kwargs))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
# Extract raw response body from non-JSON API errors.
|
||||||
|
# JSONDecodeError.doc contains the original text (e.g. "unsupported model: xxx");
|
||||||
|
# OpenAI APIError may carry it in response.text.
|
||||||
|
body = getattr(e, "doc", None) or getattr(getattr(e, "response", None), "text", None)
|
||||||
|
if body and body.strip():
|
||||||
|
return LLMResponse(content=f"Error: {body.strip()}", finish_reason="error")
|
||||||
return LLMResponse(content=f"Error: {e}", finish_reason="error")
|
return LLMResponse(content=f"Error: {e}", finish_reason="error")
|
||||||
|
|
||||||
def _parse(self, response: Any) -> LLMResponse:
|
def _parse(self, response: Any) -> LLMResponse:
|
||||||
|
|||||||
Reference in New Issue
Block a user