feat: add Ollama as a local LLM provider

Add native Ollama support so local models (e.g. nemotron-3-nano) can be
used without an API key. Adds ProviderSpec with ollama_chat LiteLLM
prefix, ProvidersConfig field, and skips API key validation for local
providers.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Jerome Sonnet (letzdoo)
2026-03-11 07:43:28 +04:00
parent 82f4607b99
commit dee4f27dce
3 changed files with 21 additions and 3 deletions

View File

@@ -252,7 +252,7 @@ def _make_provider(config: Config):
from nanobot.providers.litellm_provider import LiteLLMProvider from nanobot.providers.litellm_provider import LiteLLMProvider
from nanobot.providers.registry import find_by_name from nanobot.providers.registry import find_by_name
spec = find_by_name(provider_name) spec = find_by_name(provider_name)
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and spec.is_oauth): if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and (spec.is_oauth or spec.is_local)):
console.print("[red]Error: No API key configured.[/red]") console.print("[red]Error: No API key configured.[/red]")
console.print("Set one in ~/.nanobot/config.json under providers section") console.print("Set one in ~/.nanobot/config.json under providers section")
raise typer.Exit(1) raise typer.Exit(1)

View File

@@ -272,6 +272,7 @@ class ProvidersConfig(Base):
moonshot: ProviderConfig = Field(default_factory=ProviderConfig) moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
minimax: ProviderConfig = Field(default_factory=ProviderConfig) minimax: ProviderConfig = Field(default_factory=ProviderConfig)
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
ollama: ProviderConfig = Field(default_factory=ProviderConfig) # Ollama local models
siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动) siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动)
volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎) volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎)
openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth) openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
@@ -375,14 +376,14 @@ class Config(BaseSettings):
for spec in PROVIDERS: for spec in PROVIDERS:
p = getattr(self.providers, spec.name, None) p = getattr(self.providers, spec.name, None)
if p and model_prefix and normalized_prefix == spec.name: if p and model_prefix and normalized_prefix == spec.name:
if spec.is_oauth or p.api_key: if spec.is_oauth or spec.is_local or p.api_key:
return p, spec.name return p, spec.name
# Match by keyword (order follows PROVIDERS registry) # Match by keyword (order follows PROVIDERS registry)
for spec in PROVIDERS: for spec in PROVIDERS:
p = getattr(self.providers, spec.name, None) p = getattr(self.providers, spec.name, None)
if p and any(_kw_matches(kw) for kw in spec.keywords): if p and any(_kw_matches(kw) for kw in spec.keywords):
if spec.is_oauth or p.api_key: if spec.is_oauth or spec.is_local or p.api_key:
return p, spec.name return p, spec.name
# Fallback: gateways first, then others (follows registry order) # Fallback: gateways first, then others (follows registry order)

View File

@@ -360,6 +360,23 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False, strip_model_prefix=False,
model_overrides=(), model_overrides=(),
), ),
# === Ollama (local, OpenAI-compatible) ===================================
ProviderSpec(
name="ollama",
keywords=("ollama", "nemotron"),
env_key="OLLAMA_API_KEY",
display_name="Ollama",
litellm_prefix="ollama_chat", # model → ollama_chat/model
skip_prefixes=("ollama/", "ollama_chat/"),
env_extras=(),
is_gateway=False,
is_local=True,
detect_by_key_prefix="",
detect_by_base_keyword="11434",
default_api_base="http://localhost:11434",
strip_model_prefix=False,
model_overrides=(),
),
# === Auxiliary (not a primary LLM provider) ============================ # === Auxiliary (not a primary LLM provider) ============================
# Groq: mainly used for Whisper voice transcription, also usable for LLM. # Groq: mainly used for Whisper voice transcription, also usable for LLM.
# Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback. # Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback.