fix(custom_provider): truncate raw error body to prevent huge HTML pages

Made-with: Cursor
This commit is contained in:
Xubin Ren
2026-03-20 11:09:21 +00:00
committed by Xubin Ren
parent 8b971a7827
commit fc1ea07450

View File

@@ -51,12 +51,12 @@ class CustomProvider(LLMProvider):
try: try:
return self._parse(await self._client.chat.completions.create(**kwargs)) return self._parse(await self._client.chat.completions.create(**kwargs))
except Exception as e: except Exception as e:
# Extract raw response body from non-JSON API errors. # JSONDecodeError.doc / APIError.response.text may carry the raw body
# JSONDecodeError.doc contains the original text (e.g. "unsupported model: xxx"); # (e.g. "unsupported model: xxx") which is far more useful than the
# OpenAI APIError may carry it in response.text. # generic "Expecting value …" message. Truncate to avoid huge HTML pages.
body = getattr(e, "doc", None) or getattr(getattr(e, "response", None), "text", None) body = getattr(e, "doc", None) or getattr(getattr(e, "response", None), "text", None)
if body and body.strip(): if body and body.strip():
return LLMResponse(content=f"Error: {body.strip()}", finish_reason="error") return LLMResponse(content=f"Error: {body.strip()[:500]}", finish_reason="error")
return LLMResponse(content=f"Error: {e}", finish_reason="error") return LLMResponse(content=f"Error: {e}", finish_reason="error")
def _parse(self, response: Any) -> LLMResponse: def _parse(self, response: Any) -> LLMResponse: