refactor(tests): optimize unit test structure

This commit is contained in:
chengyongru
2026-03-24 13:37:06 +08:00
committed by Xubin Ren
parent d25985be0b
commit 72acba5d27
52 changed files with 104 additions and 13 deletions

View File

@@ -0,0 +1,399 @@
"""Test Azure OpenAI provider implementation (updated for model-based deployment names)."""
from unittest.mock import AsyncMock, Mock, patch
import pytest
from nanobot.providers.azure_openai_provider import AzureOpenAIProvider
from nanobot.providers.base import LLMResponse
def test_azure_openai_provider_init():
"""Test AzureOpenAIProvider initialization without deployment_name."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o-deployment",
)
assert provider.api_key == "test-key"
assert provider.api_base == "https://test-resource.openai.azure.com/"
assert provider.default_model == "gpt-4o-deployment"
assert provider.api_version == "2024-10-21"
def test_azure_openai_provider_init_validation():
"""Test AzureOpenAIProvider initialization validation."""
# Missing api_key
with pytest.raises(ValueError, match="Azure OpenAI api_key is required"):
AzureOpenAIProvider(api_key="", api_base="https://test.com")
# Missing api_base
with pytest.raises(ValueError, match="Azure OpenAI api_base is required"):
AzureOpenAIProvider(api_key="test", api_base="")
def test_build_chat_url():
"""Test Azure OpenAI URL building with different deployment names."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
# Test various deployment names
test_cases = [
("gpt-4o-deployment", "https://test-resource.openai.azure.com/openai/deployments/gpt-4o-deployment/chat/completions?api-version=2024-10-21"),
("gpt-35-turbo", "https://test-resource.openai.azure.com/openai/deployments/gpt-35-turbo/chat/completions?api-version=2024-10-21"),
("custom-model", "https://test-resource.openai.azure.com/openai/deployments/custom-model/chat/completions?api-version=2024-10-21"),
]
for deployment_name, expected_url in test_cases:
url = provider._build_chat_url(deployment_name)
assert url == expected_url
def test_build_chat_url_api_base_without_slash():
"""Test URL building when api_base doesn't end with slash."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com", # No trailing slash
default_model="gpt-4o",
)
url = provider._build_chat_url("test-deployment")
expected = "https://test-resource.openai.azure.com/openai/deployments/test-deployment/chat/completions?api-version=2024-10-21"
assert url == expected
def test_build_headers():
"""Test Azure OpenAI header building with api-key authentication."""
provider = AzureOpenAIProvider(
api_key="test-api-key-123",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
headers = provider._build_headers()
assert headers["Content-Type"] == "application/json"
assert headers["api-key"] == "test-api-key-123" # Azure OpenAI specific header
assert "x-session-affinity" in headers
def test_prepare_request_payload():
"""Test request payload preparation with Azure OpenAI 2024-10-21 compliance."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
messages = [{"role": "user", "content": "Hello"}]
payload = provider._prepare_request_payload("gpt-4o", messages, max_tokens=1500, temperature=0.8)
assert payload["messages"] == messages
assert payload["max_completion_tokens"] == 1500 # Azure API 2024-10-21 uses max_completion_tokens
assert payload["temperature"] == 0.8
assert "tools" not in payload
# Test with tools
tools = [{"type": "function", "function": {"name": "get_weather", "parameters": {}}}]
payload_with_tools = provider._prepare_request_payload("gpt-4o", messages, tools=tools)
assert payload_with_tools["tools"] == tools
assert payload_with_tools["tool_choice"] == "auto"
# Test with reasoning_effort
payload_with_reasoning = provider._prepare_request_payload(
"gpt-5-chat", messages, reasoning_effort="medium"
)
assert payload_with_reasoning["reasoning_effort"] == "medium"
assert "temperature" not in payload_with_reasoning
def test_prepare_request_payload_sanitizes_messages():
"""Test Azure payload strips non-standard message keys before sending."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
messages = [
{
"role": "assistant",
"tool_calls": [{"id": "call_123", "type": "function", "function": {"name": "x"}}],
"reasoning_content": "hidden chain-of-thought",
},
{
"role": "tool",
"tool_call_id": "call_123",
"name": "x",
"content": "ok",
"extra_field": "should be removed",
},
]
payload = provider._prepare_request_payload("gpt-4o", messages)
assert payload["messages"] == [
{
"role": "assistant",
"content": None,
"tool_calls": [{"id": "call_123", "type": "function", "function": {"name": "x"}}],
},
{
"role": "tool",
"tool_call_id": "call_123",
"name": "x",
"content": "ok",
},
]
@pytest.mark.asyncio
async def test_chat_success():
"""Test successful chat request using model as deployment name."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o-deployment",
)
# Mock response data
mock_response_data = {
"choices": [{
"message": {
"content": "Hello! How can I help you today?",
"role": "assistant"
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 12,
"completion_tokens": 18,
"total_tokens": 30
}
}
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.json = Mock(return_value=mock_response_data)
mock_context = AsyncMock()
mock_context.post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value = mock_context
# Test with specific model (deployment name)
messages = [{"role": "user", "content": "Hello"}]
result = await provider.chat(messages, model="custom-deployment")
assert isinstance(result, LLMResponse)
assert result.content == "Hello! How can I help you today?"
assert result.finish_reason == "stop"
assert result.usage["prompt_tokens"] == 12
assert result.usage["completion_tokens"] == 18
assert result.usage["total_tokens"] == 30
# Verify URL was built with the provided model as deployment name
call_args = mock_context.post.call_args
expected_url = "https://test-resource.openai.azure.com/openai/deployments/custom-deployment/chat/completions?api-version=2024-10-21"
assert call_args[0][0] == expected_url
@pytest.mark.asyncio
async def test_chat_uses_default_model_when_no_model_provided():
"""Test that chat uses default_model when no model is specified."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="default-deployment",
)
mock_response_data = {
"choices": [{
"message": {"content": "Response", "role": "assistant"},
"finish_reason": "stop"
}],
"usage": {"prompt_tokens": 5, "completion_tokens": 5, "total_tokens": 10}
}
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.json = Mock(return_value=mock_response_data)
mock_context = AsyncMock()
mock_context.post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value = mock_context
messages = [{"role": "user", "content": "Test"}]
await provider.chat(messages) # No model specified
# Verify URL was built with default model as deployment name
call_args = mock_context.post.call_args
expected_url = "https://test-resource.openai.azure.com/openai/deployments/default-deployment/chat/completions?api-version=2024-10-21"
assert call_args[0][0] == expected_url
@pytest.mark.asyncio
async def test_chat_with_tool_calls():
"""Test chat request with tool calls in response."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
# Mock response with tool calls
mock_response_data = {
"choices": [{
"message": {
"content": None,
"role": "assistant",
"tool_calls": [{
"id": "call_12345",
"function": {
"name": "get_weather",
"arguments": '{"location": "San Francisco"}'
}
}]
},
"finish_reason": "tool_calls"
}],
"usage": {
"prompt_tokens": 20,
"completion_tokens": 15,
"total_tokens": 35
}
}
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.json = Mock(return_value=mock_response_data)
mock_context = AsyncMock()
mock_context.post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value = mock_context
messages = [{"role": "user", "content": "What's the weather?"}]
tools = [{"type": "function", "function": {"name": "get_weather", "parameters": {}}}]
result = await provider.chat(messages, tools=tools, model="weather-model")
assert isinstance(result, LLMResponse)
assert result.content is None
assert result.finish_reason == "tool_calls"
assert len(result.tool_calls) == 1
assert result.tool_calls[0].name == "get_weather"
assert result.tool_calls[0].arguments == {"location": "San Francisco"}
@pytest.mark.asyncio
async def test_chat_api_error():
"""Test chat request API error handling."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 401
mock_response.text = "Invalid authentication credentials"
mock_context = AsyncMock()
mock_context.post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value = mock_context
messages = [{"role": "user", "content": "Hello"}]
result = await provider.chat(messages)
assert isinstance(result, LLMResponse)
assert "Azure OpenAI API Error 401" in result.content
assert "Invalid authentication credentials" in result.content
assert result.finish_reason == "error"
@pytest.mark.asyncio
async def test_chat_connection_error():
"""Test chat request connection error handling."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
with patch("httpx.AsyncClient") as mock_client:
mock_context = AsyncMock()
mock_context.post = AsyncMock(side_effect=Exception("Connection failed"))
mock_client.return_value.__aenter__.return_value = mock_context
messages = [{"role": "user", "content": "Hello"}]
result = await provider.chat(messages)
assert isinstance(result, LLMResponse)
assert "Error calling Azure OpenAI: Exception('Connection failed')" in result.content
assert result.finish_reason == "error"
def test_parse_response_malformed():
"""Test response parsing with malformed data."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o",
)
# Test with missing choices
malformed_response = {"usage": {"prompt_tokens": 10}}
result = provider._parse_response(malformed_response)
assert isinstance(result, LLMResponse)
assert "Error parsing Azure OpenAI response" in result.content
assert result.finish_reason == "error"
def test_get_default_model():
"""Test get_default_model method."""
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="my-custom-deployment",
)
assert provider.get_default_model() == "my-custom-deployment"
if __name__ == "__main__":
# Run basic tests
print("Running basic Azure OpenAI provider tests...")
# Test initialization
provider = AzureOpenAIProvider(
api_key="test-key",
api_base="https://test-resource.openai.azure.com",
default_model="gpt-4o-deployment",
)
print("✅ Provider initialization successful")
# Test URL building
url = provider._build_chat_url("my-deployment")
expected = "https://test-resource.openai.azure.com/openai/deployments/my-deployment/chat/completions?api-version=2024-10-21"
assert url == expected
print("✅ URL building works correctly")
# Test headers
headers = provider._build_headers()
assert headers["api-key"] == "test-key"
assert headers["Content-Type"] == "application/json"
print("✅ Header building works correctly")
# Test payload preparation
messages = [{"role": "user", "content": "Test"}]
payload = provider._prepare_request_payload("gpt-4o-deployment", messages, max_tokens=1000)
assert payload["max_completion_tokens"] == 1000 # Azure 2024-10-21 format
print("✅ Payload preparation works correctly")
print("✅ All basic tests passed! Updated test file is working correctly.")

View File

@@ -0,0 +1,13 @@
from types import SimpleNamespace
from nanobot.providers.custom_provider import CustomProvider
def test_custom_provider_parse_handles_empty_choices() -> None:
provider = CustomProvider()
response = SimpleNamespace(choices=[])
result = provider._parse(response)
assert result.finish_reason == "error"
assert "empty choices" in result.content

View File

@@ -0,0 +1,161 @@
"""Regression tests for PR #2026 — litellm_kwargs injection from ProviderSpec.
Validates that:
- OpenRouter uses litellm_prefix (NOT custom_llm_provider) to avoid LiteLLM double-prefixing.
- The litellm_kwargs mechanism works correctly for providers that declare it.
- Non-gateway providers are unaffected.
"""
from __future__ import annotations
from types import SimpleNamespace
from typing import Any
from unittest.mock import AsyncMock, patch
import pytest
from nanobot.providers.litellm_provider import LiteLLMProvider
from nanobot.providers.registry import find_by_name
def _fake_response(content: str = "ok") -> SimpleNamespace:
"""Build a minimal acompletion-shaped response object."""
message = SimpleNamespace(
content=content,
tool_calls=None,
reasoning_content=None,
thinking_blocks=None,
)
choice = SimpleNamespace(message=message, finish_reason="stop")
usage = SimpleNamespace(prompt_tokens=10, completion_tokens=5, total_tokens=15)
return SimpleNamespace(choices=[choice], usage=usage)
def test_openrouter_spec_uses_prefix_not_custom_llm_provider() -> None:
"""OpenRouter must rely on litellm_prefix, not custom_llm_provider kwarg.
LiteLLM internally adds a provider/ prefix when custom_llm_provider is set,
which double-prefixes models (openrouter/anthropic/model) and breaks the API.
"""
spec = find_by_name("openrouter")
assert spec is not None
assert spec.litellm_prefix == "openrouter"
assert "custom_llm_provider" not in spec.litellm_kwargs, (
"custom_llm_provider causes LiteLLM to double-prefix the model name"
)
@pytest.mark.asyncio
async def test_openrouter_prefixes_model_correctly() -> None:
"""OpenRouter should prefix model as openrouter/vendor/model for LiteLLM routing."""
mock_acompletion = AsyncMock(return_value=_fake_response())
with patch("nanobot.providers.litellm_provider.acompletion", mock_acompletion):
provider = LiteLLMProvider(
api_key="sk-or-test-key",
api_base="https://openrouter.ai/api/v1",
default_model="anthropic/claude-sonnet-4-5",
provider_name="openrouter",
)
await provider.chat(
messages=[{"role": "user", "content": "hello"}],
model="anthropic/claude-sonnet-4-5",
)
call_kwargs = mock_acompletion.call_args.kwargs
assert call_kwargs["model"] == "openrouter/anthropic/claude-sonnet-4-5", (
"LiteLLM needs openrouter/ prefix to detect the provider and strip it before API call"
)
assert "custom_llm_provider" not in call_kwargs
@pytest.mark.asyncio
async def test_non_gateway_provider_no_extra_kwargs() -> None:
"""Standard (non-gateway) providers must NOT inject any litellm_kwargs."""
mock_acompletion = AsyncMock(return_value=_fake_response())
with patch("nanobot.providers.litellm_provider.acompletion", mock_acompletion):
provider = LiteLLMProvider(
api_key="sk-ant-test-key",
default_model="claude-sonnet-4-5",
)
await provider.chat(
messages=[{"role": "user", "content": "hello"}],
model="claude-sonnet-4-5",
)
call_kwargs = mock_acompletion.call_args.kwargs
assert "custom_llm_provider" not in call_kwargs, (
"Standard Anthropic provider should NOT inject custom_llm_provider"
)
@pytest.mark.asyncio
async def test_gateway_without_litellm_kwargs_injects_nothing_extra() -> None:
"""Gateways without litellm_kwargs (e.g. AiHubMix) must not add extra keys."""
mock_acompletion = AsyncMock(return_value=_fake_response())
with patch("nanobot.providers.litellm_provider.acompletion", mock_acompletion):
provider = LiteLLMProvider(
api_key="sk-aihub-test-key",
api_base="https://aihubmix.com/v1",
default_model="claude-sonnet-4-5",
provider_name="aihubmix",
)
await provider.chat(
messages=[{"role": "user", "content": "hello"}],
model="claude-sonnet-4-5",
)
call_kwargs = mock_acompletion.call_args.kwargs
assert "custom_llm_provider" not in call_kwargs
@pytest.mark.asyncio
async def test_openrouter_autodetect_by_key_prefix() -> None:
"""OpenRouter should be auto-detected by sk-or- key prefix even without explicit provider_name."""
mock_acompletion = AsyncMock(return_value=_fake_response())
with patch("nanobot.providers.litellm_provider.acompletion", mock_acompletion):
provider = LiteLLMProvider(
api_key="sk-or-auto-detect-key",
default_model="anthropic/claude-sonnet-4-5",
)
await provider.chat(
messages=[{"role": "user", "content": "hello"}],
model="anthropic/claude-sonnet-4-5",
)
call_kwargs = mock_acompletion.call_args.kwargs
assert call_kwargs["model"] == "openrouter/anthropic/claude-sonnet-4-5", (
"Auto-detected OpenRouter should prefix model for LiteLLM routing"
)
@pytest.mark.asyncio
async def test_openrouter_native_model_id_gets_double_prefixed() -> None:
"""Models like openrouter/free must be double-prefixed so LiteLLM strips one layer.
openrouter/free is an actual OpenRouter model ID. LiteLLM strips the first
openrouter/ for routing, so we must send openrouter/openrouter/free to ensure
the API receives openrouter/free.
"""
mock_acompletion = AsyncMock(return_value=_fake_response())
with patch("nanobot.providers.litellm_provider.acompletion", mock_acompletion):
provider = LiteLLMProvider(
api_key="sk-or-test-key",
api_base="https://openrouter.ai/api/v1",
default_model="openrouter/free",
provider_name="openrouter",
)
await provider.chat(
messages=[{"role": "user", "content": "hello"}],
model="openrouter/free",
)
call_kwargs = mock_acompletion.call_args.kwargs
assert call_kwargs["model"] == "openrouter/openrouter/free", (
"openrouter/free must become openrouter/openrouter/free — "
"LiteLLM strips one layer so the API receives openrouter/free"
)

View File

@@ -0,0 +1,22 @@
"""Tests for the Mistral provider registration."""
from nanobot.config.schema import ProvidersConfig
from nanobot.providers.registry import PROVIDERS
def test_mistral_config_field_exists():
"""ProvidersConfig should have a mistral field."""
config = ProvidersConfig()
assert hasattr(config, "mistral")
def test_mistral_provider_in_registry():
"""Mistral should be registered in the provider registry."""
specs = {s.name: s for s in PROVIDERS}
assert "mistral" in specs
mistral = specs["mistral"]
assert mistral.env_key == "MISTRAL_API_KEY"
assert mistral.litellm_prefix == "mistral"
assert mistral.default_api_base == "https://api.mistral.ai/v1"
assert "mistral/" in mistral.skip_prefixes

View File

@@ -0,0 +1,213 @@
import asyncio
import pytest
from nanobot.providers.base import GenerationSettings, LLMProvider, LLMResponse
class ScriptedProvider(LLMProvider):
def __init__(self, responses):
super().__init__()
self._responses = list(responses)
self.calls = 0
self.last_kwargs: dict = {}
async def chat(self, *args, **kwargs) -> LLMResponse:
self.calls += 1
self.last_kwargs = kwargs
response = self._responses.pop(0)
if isinstance(response, BaseException):
raise response
return response
def get_default_model(self) -> str:
return "test-model"
@pytest.mark.asyncio
async def test_chat_with_retry_retries_transient_error_then_succeeds(monkeypatch) -> None:
provider = ScriptedProvider([
LLMResponse(content="429 rate limit", finish_reason="error"),
LLMResponse(content="ok"),
])
delays: list[int] = []
async def _fake_sleep(delay: int) -> None:
delays.append(delay)
monkeypatch.setattr("nanobot.providers.base.asyncio.sleep", _fake_sleep)
response = await provider.chat_with_retry(messages=[{"role": "user", "content": "hello"}])
assert response.finish_reason == "stop"
assert response.content == "ok"
assert provider.calls == 2
assert delays == [1]
@pytest.mark.asyncio
async def test_chat_with_retry_does_not_retry_non_transient_error(monkeypatch) -> None:
provider = ScriptedProvider([
LLMResponse(content="401 unauthorized", finish_reason="error"),
])
delays: list[int] = []
async def _fake_sleep(delay: int) -> None:
delays.append(delay)
monkeypatch.setattr("nanobot.providers.base.asyncio.sleep", _fake_sleep)
response = await provider.chat_with_retry(messages=[{"role": "user", "content": "hello"}])
assert response.content == "401 unauthorized"
assert provider.calls == 1
assert delays == []
@pytest.mark.asyncio
async def test_chat_with_retry_returns_final_error_after_retries(monkeypatch) -> None:
provider = ScriptedProvider([
LLMResponse(content="429 rate limit a", finish_reason="error"),
LLMResponse(content="429 rate limit b", finish_reason="error"),
LLMResponse(content="429 rate limit c", finish_reason="error"),
LLMResponse(content="503 final server error", finish_reason="error"),
])
delays: list[int] = []
async def _fake_sleep(delay: int) -> None:
delays.append(delay)
monkeypatch.setattr("nanobot.providers.base.asyncio.sleep", _fake_sleep)
response = await provider.chat_with_retry(messages=[{"role": "user", "content": "hello"}])
assert response.content == "503 final server error"
assert provider.calls == 4
assert delays == [1, 2, 4]
@pytest.mark.asyncio
async def test_chat_with_retry_preserves_cancelled_error() -> None:
provider = ScriptedProvider([asyncio.CancelledError()])
with pytest.raises(asyncio.CancelledError):
await provider.chat_with_retry(messages=[{"role": "user", "content": "hello"}])
@pytest.mark.asyncio
async def test_chat_with_retry_uses_provider_generation_defaults() -> None:
"""When callers omit generation params, provider.generation defaults are used."""
provider = ScriptedProvider([LLMResponse(content="ok")])
provider.generation = GenerationSettings(temperature=0.2, max_tokens=321, reasoning_effort="high")
await provider.chat_with_retry(messages=[{"role": "user", "content": "hello"}])
assert provider.last_kwargs["temperature"] == 0.2
assert provider.last_kwargs["max_tokens"] == 321
assert provider.last_kwargs["reasoning_effort"] == "high"
@pytest.mark.asyncio
async def test_chat_with_retry_explicit_override_beats_defaults() -> None:
"""Explicit kwargs should override provider.generation defaults."""
provider = ScriptedProvider([LLMResponse(content="ok")])
provider.generation = GenerationSettings(temperature=0.2, max_tokens=321, reasoning_effort="high")
await provider.chat_with_retry(
messages=[{"role": "user", "content": "hello"}],
temperature=0.9,
max_tokens=9999,
reasoning_effort="low",
)
assert provider.last_kwargs["temperature"] == 0.9
assert provider.last_kwargs["max_tokens"] == 9999
assert provider.last_kwargs["reasoning_effort"] == "low"
# ---------------------------------------------------------------------------
# Image fallback tests
# ---------------------------------------------------------------------------
_IMAGE_MSG = [
{"role": "user", "content": [
{"type": "text", "text": "describe this"},
{"type": "image_url", "image_url": {"url": "data:image/png;base64,abc"}, "_meta": {"path": "/media/test.png"}},
]},
]
_IMAGE_MSG_NO_META = [
{"role": "user", "content": [
{"type": "text", "text": "describe this"},
{"type": "image_url", "image_url": {"url": "data:image/png;base64,abc"}},
]},
]
@pytest.mark.asyncio
async def test_non_transient_error_with_images_retries_without_images() -> None:
"""Any non-transient error retries once with images stripped when images are present."""
provider = ScriptedProvider([
LLMResponse(content="API调用参数有误,请检查文档", finish_reason="error"),
LLMResponse(content="ok, no image"),
])
response = await provider.chat_with_retry(messages=_IMAGE_MSG)
assert response.content == "ok, no image"
assert provider.calls == 2
msgs_on_retry = provider.last_kwargs["messages"]
for msg in msgs_on_retry:
content = msg.get("content")
if isinstance(content, list):
assert all(b.get("type") != "image_url" for b in content)
assert any("[image: /media/test.png]" in (b.get("text") or "") for b in content)
@pytest.mark.asyncio
async def test_non_transient_error_without_images_no_retry() -> None:
"""Non-transient errors without image content are returned immediately."""
provider = ScriptedProvider([
LLMResponse(content="401 unauthorized", finish_reason="error"),
])
response = await provider.chat_with_retry(
messages=[{"role": "user", "content": "hello"}],
)
assert provider.calls == 1
assert response.finish_reason == "error"
@pytest.mark.asyncio
async def test_image_fallback_returns_error_on_second_failure() -> None:
"""If the image-stripped retry also fails, return that error."""
provider = ScriptedProvider([
LLMResponse(content="some model error", finish_reason="error"),
LLMResponse(content="still failing", finish_reason="error"),
])
response = await provider.chat_with_retry(messages=_IMAGE_MSG)
assert provider.calls == 2
assert response.content == "still failing"
assert response.finish_reason == "error"
@pytest.mark.asyncio
async def test_image_fallback_without_meta_uses_default_placeholder() -> None:
"""When _meta is absent, fallback placeholder is '[image omitted]'."""
provider = ScriptedProvider([
LLMResponse(content="error", finish_reason="error"),
LLMResponse(content="ok"),
])
response = await provider.chat_with_retry(messages=_IMAGE_MSG_NO_META)
assert response.content == "ok"
assert provider.calls == 2
msgs_on_retry = provider.last_kwargs["messages"]
for msg in msgs_on_retry:
content = msg.get("content")
if isinstance(content, list):
assert any("[image omitted]" in (b.get("text") or "") for b in content)

View File

@@ -0,0 +1,37 @@
"""Tests for lazy provider exports from nanobot.providers."""
from __future__ import annotations
import importlib
import sys
def test_importing_providers_package_is_lazy(monkeypatch) -> None:
monkeypatch.delitem(sys.modules, "nanobot.providers", raising=False)
monkeypatch.delitem(sys.modules, "nanobot.providers.litellm_provider", raising=False)
monkeypatch.delitem(sys.modules, "nanobot.providers.openai_codex_provider", raising=False)
monkeypatch.delitem(sys.modules, "nanobot.providers.azure_openai_provider", raising=False)
providers = importlib.import_module("nanobot.providers")
assert "nanobot.providers.litellm_provider" not in sys.modules
assert "nanobot.providers.openai_codex_provider" not in sys.modules
assert "nanobot.providers.azure_openai_provider" not in sys.modules
assert providers.__all__ == [
"LLMProvider",
"LLMResponse",
"LiteLLMProvider",
"OpenAICodexProvider",
"AzureOpenAIProvider",
]
def test_explicit_provider_import_still_works(monkeypatch) -> None:
monkeypatch.delitem(sys.modules, "nanobot.providers", raising=False)
monkeypatch.delitem(sys.modules, "nanobot.providers.litellm_provider", raising=False)
namespace: dict[str, object] = {}
exec("from nanobot.providers import LiteLLMProvider", namespace)
assert namespace["LiteLLMProvider"].__name__ == "LiteLLMProvider"
assert "nanobot.providers.litellm_provider" in sys.modules