feat: add Ollama as a local LLM provider
Add native Ollama support so local models (e.g. nemotron-3-nano) can be used without an API key. Adds ProviderSpec with ollama_chat LiteLLM prefix, ProvidersConfig field, and skips API key validation for local providers. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -252,7 +252,7 @@ def _make_provider(config: Config):
|
||||
from nanobot.providers.litellm_provider import LiteLLMProvider
|
||||
from nanobot.providers.registry import find_by_name
|
||||
spec = find_by_name(provider_name)
|
||||
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and spec.is_oauth):
|
||||
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and (spec.is_oauth or spec.is_local)):
|
||||
console.print("[red]Error: No API key configured.[/red]")
|
||||
console.print("Set one in ~/.nanobot/config.json under providers section")
|
||||
raise typer.Exit(1)
|
||||
|
||||
@@ -272,6 +272,7 @@ class ProvidersConfig(Base):
|
||||
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
|
||||
ollama: ProviderConfig = Field(default_factory=ProviderConfig) # Ollama local models
|
||||
siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动)
|
||||
volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎)
|
||||
openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
|
||||
@@ -375,14 +376,14 @@ class Config(BaseSettings):
|
||||
for spec in PROVIDERS:
|
||||
p = getattr(self.providers, spec.name, None)
|
||||
if p and model_prefix and normalized_prefix == spec.name:
|
||||
if spec.is_oauth or p.api_key:
|
||||
if spec.is_oauth or spec.is_local or p.api_key:
|
||||
return p, spec.name
|
||||
|
||||
# Match by keyword (order follows PROVIDERS registry)
|
||||
for spec in PROVIDERS:
|
||||
p = getattr(self.providers, spec.name, None)
|
||||
if p and any(_kw_matches(kw) for kw in spec.keywords):
|
||||
if spec.is_oauth or p.api_key:
|
||||
if spec.is_oauth or spec.is_local or p.api_key:
|
||||
return p, spec.name
|
||||
|
||||
# Fallback: gateways first, then others (follows registry order)
|
||||
|
||||
@@ -360,6 +360,23 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
|
||||
strip_model_prefix=False,
|
||||
model_overrides=(),
|
||||
),
|
||||
# === Ollama (local, OpenAI-compatible) ===================================
|
||||
ProviderSpec(
|
||||
name="ollama",
|
||||
keywords=("ollama", "nemotron"),
|
||||
env_key="OLLAMA_API_KEY",
|
||||
display_name="Ollama",
|
||||
litellm_prefix="ollama_chat", # model → ollama_chat/model
|
||||
skip_prefixes=("ollama/", "ollama_chat/"),
|
||||
env_extras=(),
|
||||
is_gateway=False,
|
||||
is_local=True,
|
||||
detect_by_key_prefix="",
|
||||
detect_by_base_keyword="11434",
|
||||
default_api_base="http://localhost:11434",
|
||||
strip_model_prefix=False,
|
||||
model_overrides=(),
|
||||
),
|
||||
# === Auxiliary (not a primary LLM provider) ============================
|
||||
# Groq: mainly used for Whisper voice transcription, also usable for LLM.
|
||||
# Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback.
|
||||
|
||||
Reference in New Issue
Block a user