Fix Codex provider routing for GitHub Copilot models

This commit is contained in:
PiEgg
2026-02-19 13:30:02 +08:00
parent 8de36d398f
commit 9789307dd6
5 changed files with 90 additions and 4 deletions

View File

@@ -287,11 +287,34 @@ class Config(BaseSettings):
from nanobot.providers.registry import PROVIDERS
model_lower = (model or self.agents.defaults.model).lower()
model_normalized = model_lower.replace("-", "_")
model_prefix = model_lower.split("/", 1)[0] if "/" in model_lower else ""
normalized_prefix = model_prefix.replace("-", "_")
def _matches_model_prefix(spec_name: str) -> bool:
if not model_prefix:
return False
return normalized_prefix == spec_name
def _keyword_matches(keyword: str) -> bool:
keyword_lower = keyword.lower()
return (
keyword_lower in model_lower
or keyword_lower.replace("-", "_") in model_normalized
)
# Explicit provider prefix in model name wins over generic keyword matches.
# This prevents `github-copilot/...codex` from being treated as OpenAI Codex.
for spec in PROVIDERS:
p = getattr(self.providers, spec.name, None)
if p and _matches_model_prefix(spec.name):
if spec.is_oauth or p.api_key:
return p, spec.name
# Match by keyword (order follows PROVIDERS registry)
for spec in PROVIDERS:
p = getattr(self.providers, spec.name, None)
if p and any(kw in model_lower for kw in spec.keywords):
if p and any(_keyword_matches(kw) for kw in spec.keywords):
if spec.is_oauth or p.api_key:
return p, spec.name

View File

@@ -88,10 +88,21 @@ class LiteLLMProvider(LLMProvider):
# Standard mode: auto-prefix for known providers
spec = find_by_model(model)
if spec and spec.litellm_prefix:
model = self._canonicalize_explicit_prefix(model, spec.name, spec.litellm_prefix)
if not any(model.startswith(s) for s in spec.skip_prefixes):
model = f"{spec.litellm_prefix}/{model}"
return model
@staticmethod
def _canonicalize_explicit_prefix(model: str, spec_name: str, canonical_prefix: str) -> str:
"""Normalize explicit provider prefixes like `github-copilot/...`."""
if "/" not in model:
return model
prefix, remainder = model.split("/", 1)
if prefix.lower().replace("-", "_") != spec_name:
return model
return f"{canonical_prefix}/{remainder}"
def _apply_model_overrides(self, model: str, kwargs: dict[str, Any]) -> None:
"""Apply model-specific parameter overrides from the registry."""

View File

@@ -80,7 +80,7 @@ class OpenAICodexProvider(LLMProvider):
def _strip_model_prefix(model: str) -> str:
if model.startswith("openai-codex/"):
if model.startswith("openai-codex/") or model.startswith("openai_codex/"):
return model.split("/", 1)[1]
return model

View File

@@ -384,10 +384,24 @@ def find_by_model(model: str) -> ProviderSpec | None:
"""Match a standard provider by model-name keyword (case-insensitive).
Skips gateways/local — those are matched by api_key/api_base instead."""
model_lower = model.lower()
model_normalized = model_lower.replace("-", "_")
model_prefix = model_lower.split("/", 1)[0] if "/" in model_lower else ""
normalized_prefix = model_prefix.replace("-", "_")
# Prefer explicit provider prefix in model name.
for spec in PROVIDERS:
if spec.is_gateway or spec.is_local:
continue
if any(kw in model_lower for kw in spec.keywords):
if model_prefix and normalized_prefix == spec.name:
return spec
for spec in PROVIDERS:
if spec.is_gateway or spec.is_local:
continue
if any(
kw in model_lower or kw.replace("-", "_") in model_normalized
for kw in spec.keywords
):
return spec
return None