- Add OpenAI Codex ProviderSpec to registry.py
- Add openai_codex config field to ProvidersConfig in schema.py
- Mark Codex as OAuth-based (no API key required)
- Set appropriate default_api_base for Codex API
This integrates the Codex OAuth provider with the refactored
provider registry system introduced in upstream commit 299d8b3.
342 lines
12 KiB
Python
342 lines
12 KiB
Python
"""
|
|
Provider Registry — single source of truth for LLM provider metadata.
|
|
|
|
Adding a new provider:
|
|
1. Add a ProviderSpec to PROVIDERS below.
|
|
2. Add a field to ProvidersConfig in config/schema.py.
|
|
Done. Env vars, prefixing, config matching, status display all derive from here.
|
|
|
|
Order matters — it controls match priority and fallback. Gateways first.
|
|
Every entry writes out all fields so you can copy-paste as a template.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
from dataclasses import dataclass
|
|
from typing import Any
|
|
|
|
|
|
@dataclass(frozen=True)
|
|
class ProviderSpec:
|
|
"""One LLM provider's metadata. See PROVIDERS below for real examples.
|
|
|
|
Placeholders in env_extras values:
|
|
{api_key} — the user's API key
|
|
{api_base} — api_base from config, or this spec's default_api_base
|
|
"""
|
|
|
|
# identity
|
|
name: str # config field name, e.g. "dashscope"
|
|
keywords: tuple[str, ...] # model-name keywords for matching (lowercase)
|
|
env_key: str # LiteLLM env var, e.g. "DASHSCOPE_API_KEY"
|
|
display_name: str = "" # shown in `nanobot status`
|
|
|
|
# model prefixing
|
|
litellm_prefix: str = "" # "dashscope" → model becomes "dashscope/{model}"
|
|
skip_prefixes: tuple[str, ...] = () # don't prefix if model already starts with these
|
|
|
|
# extra env vars, e.g. (("ZHIPUAI_API_KEY", "{api_key}"),)
|
|
env_extras: tuple[tuple[str, str], ...] = ()
|
|
|
|
# gateway / local detection
|
|
is_gateway: bool = False # routes any model (OpenRouter, AiHubMix)
|
|
is_local: bool = False # local deployment (vLLM, Ollama)
|
|
detect_by_key_prefix: str = "" # match api_key prefix, e.g. "sk-or-"
|
|
detect_by_base_keyword: str = "" # match substring in api_base URL
|
|
default_api_base: str = "" # fallback base URL
|
|
|
|
# gateway behavior
|
|
strip_model_prefix: bool = False # strip "provider/" before re-prefixing
|
|
|
|
# per-model param overrides, e.g. (("kimi-k2.5", {"temperature": 1.0}),)
|
|
model_overrides: tuple[tuple[str, dict[str, Any]], ...] = ()
|
|
|
|
@property
|
|
def label(self) -> str:
|
|
return self.display_name or self.name.title()
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# PROVIDERS — the registry. Order = priority. Copy any entry as template.
|
|
# ---------------------------------------------------------------------------
|
|
|
|
PROVIDERS: tuple[ProviderSpec, ...] = (
|
|
|
|
# === Gateways (detected by api_key / api_base, not model name) =========
|
|
# Gateways can route any model, so they win in fallback.
|
|
|
|
# OpenRouter: global gateway, keys start with "sk-or-"
|
|
ProviderSpec(
|
|
name="openrouter",
|
|
keywords=("openrouter",),
|
|
env_key="OPENROUTER_API_KEY",
|
|
display_name="OpenRouter",
|
|
litellm_prefix="openrouter", # claude-3 → openrouter/claude-3
|
|
skip_prefixes=(),
|
|
env_extras=(),
|
|
is_gateway=True,
|
|
is_local=False,
|
|
detect_by_key_prefix="sk-or-",
|
|
detect_by_base_keyword="openrouter",
|
|
default_api_base="https://openrouter.ai/api/v1",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# AiHubMix: global gateway, OpenAI-compatible interface.
|
|
# strip_model_prefix=True: it doesn't understand "anthropic/claude-3",
|
|
# so we strip to bare "claude-3" then re-prefix as "openai/claude-3".
|
|
ProviderSpec(
|
|
name="aihubmix",
|
|
keywords=("aihubmix",),
|
|
env_key="OPENAI_API_KEY", # OpenAI-compatible
|
|
display_name="AiHubMix",
|
|
litellm_prefix="openai", # → openai/{model}
|
|
skip_prefixes=(),
|
|
env_extras=(),
|
|
is_gateway=True,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="aihubmix",
|
|
default_api_base="https://aihubmix.com/v1",
|
|
strip_model_prefix=True, # anthropic/claude-3 → claude-3 → openai/claude-3
|
|
model_overrides=(),
|
|
),
|
|
|
|
# === Standard providers (matched by model-name keywords) ===============
|
|
|
|
# Anthropic: LiteLLM recognizes "claude-*" natively, no prefix needed.
|
|
ProviderSpec(
|
|
name="anthropic",
|
|
keywords=("anthropic", "claude"),
|
|
env_key="ANTHROPIC_API_KEY",
|
|
display_name="Anthropic",
|
|
litellm_prefix="",
|
|
skip_prefixes=(),
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# OpenAI: LiteLLM recognizes "gpt-*" natively, no prefix needed.
|
|
ProviderSpec(
|
|
name="openai",
|
|
keywords=("openai", "gpt"),
|
|
env_key="OPENAI_API_KEY",
|
|
display_name="OpenAI",
|
|
litellm_prefix="",
|
|
skip_prefixes=(),
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# OpenAI Codex: uses OAuth, not API key.
|
|
ProviderSpec(
|
|
name="openai_codex",
|
|
keywords=("openai-codex", "codex"),
|
|
env_key="", # OAuth-based, no API key
|
|
display_name="OpenAI Codex",
|
|
litellm_prefix="", # Not routed through LiteLLM
|
|
skip_prefixes=(),
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="codex",
|
|
default_api_base="https://chatgpt.com/backend-api",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# DeepSeek: needs "deepseek/" prefix for LiteLLM routing.
|
|
ProviderSpec(
|
|
name="deepseek",
|
|
keywords=("deepseek",),
|
|
env_key="DEEPSEEK_API_KEY",
|
|
display_name="DeepSeek",
|
|
litellm_prefix="deepseek", # deepseek-chat → deepseek/deepseek-chat
|
|
skip_prefixes=("deepseek/",), # avoid double-prefix
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# Gemini: needs "gemini/" prefix for LiteLLM.
|
|
ProviderSpec(
|
|
name="gemini",
|
|
keywords=("gemini",),
|
|
env_key="GEMINI_API_KEY",
|
|
display_name="Gemini",
|
|
litellm_prefix="gemini", # gemini-pro → gemini/gemini-pro
|
|
skip_prefixes=("gemini/",), # avoid double-prefix
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# Zhipu: LiteLLM uses "zai/" prefix.
|
|
# Also mirrors key to ZHIPUAI_API_KEY (some LiteLLM paths check that).
|
|
# skip_prefixes: don't add "zai/" when already routed via gateway.
|
|
ProviderSpec(
|
|
name="zhipu",
|
|
keywords=("zhipu", "glm", "zai"),
|
|
env_key="ZAI_API_KEY",
|
|
display_name="Zhipu AI",
|
|
litellm_prefix="zai", # glm-4 → zai/glm-4
|
|
skip_prefixes=("zhipu/", "zai/", "openrouter/", "hosted_vllm/"),
|
|
env_extras=(
|
|
("ZHIPUAI_API_KEY", "{api_key}"),
|
|
),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# DashScope: Qwen models, needs "dashscope/" prefix.
|
|
ProviderSpec(
|
|
name="dashscope",
|
|
keywords=("qwen", "dashscope"),
|
|
env_key="DASHSCOPE_API_KEY",
|
|
display_name="DashScope",
|
|
litellm_prefix="dashscope", # qwen-max → dashscope/qwen-max
|
|
skip_prefixes=("dashscope/", "openrouter/"),
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# Moonshot: Kimi models, needs "moonshot/" prefix.
|
|
# LiteLLM requires MOONSHOT_API_BASE env var to find the endpoint.
|
|
# Kimi K2.5 API enforces temperature >= 1.0.
|
|
ProviderSpec(
|
|
name="moonshot",
|
|
keywords=("moonshot", "kimi"),
|
|
env_key="MOONSHOT_API_KEY",
|
|
display_name="Moonshot",
|
|
litellm_prefix="moonshot", # kimi-k2.5 → moonshot/kimi-k2.5
|
|
skip_prefixes=("moonshot/", "openrouter/"),
|
|
env_extras=(
|
|
("MOONSHOT_API_BASE", "{api_base}"),
|
|
),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="https://api.moonshot.ai/v1", # intl; use api.moonshot.cn for China
|
|
strip_model_prefix=False,
|
|
model_overrides=(
|
|
("kimi-k2.5", {"temperature": 1.0}),
|
|
),
|
|
),
|
|
|
|
# === Local deployment (fallback: unknown api_base → assume local) ======
|
|
|
|
# vLLM / any OpenAI-compatible local server.
|
|
# If api_base is set but doesn't match a known gateway, we land here.
|
|
# Placed before Groq so vLLM wins the fallback when both are configured.
|
|
ProviderSpec(
|
|
name="vllm",
|
|
keywords=("vllm",),
|
|
env_key="HOSTED_VLLM_API_KEY",
|
|
display_name="vLLM/Local",
|
|
litellm_prefix="hosted_vllm", # Llama-3-8B → hosted_vllm/Llama-3-8B
|
|
skip_prefixes=(),
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=True,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="", # user must provide in config
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
|
|
# === Auxiliary (not a primary LLM provider) ============================
|
|
|
|
# Groq: mainly used for Whisper voice transcription, also usable for LLM.
|
|
# Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback.
|
|
ProviderSpec(
|
|
name="groq",
|
|
keywords=("groq",),
|
|
env_key="GROQ_API_KEY",
|
|
display_name="Groq",
|
|
litellm_prefix="groq", # llama3-8b-8192 → groq/llama3-8b-8192
|
|
skip_prefixes=("groq/",), # avoid double-prefix
|
|
env_extras=(),
|
|
is_gateway=False,
|
|
is_local=False,
|
|
detect_by_key_prefix="",
|
|
detect_by_base_keyword="",
|
|
default_api_base="",
|
|
strip_model_prefix=False,
|
|
model_overrides=(),
|
|
),
|
|
)
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Lookup helpers
|
|
# ---------------------------------------------------------------------------
|
|
|
|
def find_by_model(model: str) -> ProviderSpec | None:
|
|
"""Match a standard provider by model-name keyword (case-insensitive).
|
|
Skips gateways/local — those are matched by api_key/api_base instead."""
|
|
model_lower = model.lower()
|
|
for spec in PROVIDERS:
|
|
if spec.is_gateway or spec.is_local:
|
|
continue
|
|
if any(kw in model_lower for kw in spec.keywords):
|
|
return spec
|
|
return None
|
|
|
|
|
|
def find_gateway(api_key: str | None, api_base: str | None) -> ProviderSpec | None:
|
|
"""Detect gateway/local by api_key prefix or api_base substring.
|
|
Fallback: unknown api_base → treat as local (vLLM)."""
|
|
for spec in PROVIDERS:
|
|
if spec.detect_by_key_prefix and api_key and api_key.startswith(spec.detect_by_key_prefix):
|
|
return spec
|
|
if spec.detect_by_base_keyword and api_base and spec.detect_by_base_keyword in api_base:
|
|
return spec
|
|
if api_base:
|
|
return next((s for s in PROVIDERS if s.is_local), None)
|
|
return None
|
|
|
|
|
|
def find_by_name(name: str) -> ProviderSpec | None:
|
|
"""Find a provider spec by config field name, e.g. "dashscope"."""
|
|
for spec in PROVIDERS:
|
|
if spec.name == name:
|
|
return spec
|
|
return None
|