feat: add vLLM/local LLM support

- Add vllm provider configuration in config schema
- Auto-detect vLLM endpoints and use hosted_vllm/ prefix for LiteLLM
- Pass api_base directly to acompletion for custom endpoints
- Add vLLM status display in CLI status command
- Add vLLM setup documentation in README
This commit is contained in:
ZhihaoZhang97
2026-02-02 11:23:04 +11:00
parent 959c4dadf8
commit 2b19dcf9fd
4 changed files with 61 additions and 2 deletions

View File

@@ -50,6 +50,7 @@ class ProvidersConfig(BaseModel):
anthropic: ProviderConfig = Field(default_factory=ProviderConfig)
openai: ProviderConfig = Field(default_factory=ProviderConfig)
openrouter: ProviderConfig = Field(default_factory=ProviderConfig)
vllm: ProviderConfig = Field(default_factory=ProviderConfig)
class GatewayConfig(BaseModel):
@@ -88,18 +89,21 @@ class Config(BaseSettings):
return Path(self.agents.defaults.workspace).expanduser()
def get_api_key(self) -> str | None:
"""Get API key in priority order: OpenRouter > Anthropic > OpenAI."""
"""Get API key in priority order: OpenRouter > Anthropic > OpenAI > vLLM."""
return (
self.providers.openrouter.api_key or
self.providers.anthropic.api_key or
self.providers.openai.api_key or
self.providers.vllm.api_key or
None
)
def get_api_base(self) -> str | None:
"""Get API base URL if using OpenRouter."""
"""Get API base URL if using OpenRouter or vLLM."""
if self.providers.openrouter.api_key:
return self.providers.openrouter.api_base or "https://openrouter.ai/api/v1"
if self.providers.vllm.api_base:
return self.providers.vllm.api_base
return None
class Config: