Merge remote-tracking branch 'origin/main' into pr-1868
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -20,4 +20,5 @@ __pycache__/
|
||||
poetry.lock
|
||||
.pytest_cache/
|
||||
botpy.log
|
||||
nano.*.save
|
||||
|
||||
|
||||
92
README.md
92
README.md
@@ -78,6 +78,25 @@
|
||||
<img src="nanobot_arch.png" alt="nanobot architecture" width="800">
|
||||
</p>
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [News](#-news)
|
||||
- [Key Features](#key-features-of-nanobot)
|
||||
- [Architecture](#️-architecture)
|
||||
- [Features](#-features)
|
||||
- [Install](#-install)
|
||||
- [Quick Start](#-quick-start)
|
||||
- [Chat Apps](#-chat-apps)
|
||||
- [Agent Social Network](#-agent-social-network)
|
||||
- [Configuration](#️-configuration)
|
||||
- [Multiple Instances](#-multiple-instances)
|
||||
- [CLI Reference](#-cli-reference)
|
||||
- [Docker](#-docker)
|
||||
- [Linux Service](#-linux-service)
|
||||
- [Project Structure](#-project-structure)
|
||||
- [Contribute & Roadmap](#-contribute--roadmap)
|
||||
- [Star History](#-star-history)
|
||||
|
||||
## ✨ Features
|
||||
|
||||
<table align="center">
|
||||
@@ -208,6 +227,7 @@ Connect nanobot to your favorite chat platform.
|
||||
| **Slack** | Bot token + App-Level token |
|
||||
| **Email** | IMAP/SMTP credentials |
|
||||
| **QQ** | App ID + App Secret |
|
||||
| **Wecom** | Bot ID + Bot Secret |
|
||||
|
||||
<details>
|
||||
<summary><b>Telegram</b> (Recommended)</summary>
|
||||
@@ -677,6 +697,46 @@ nanobot gateway
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Wecom (企业微信)</b></summary>
|
||||
|
||||
> Here we use [wecom-aibot-sdk-python](https://github.com/chengyongru/wecom_aibot_sdk) (community Python version of the official [@wecom/aibot-node-sdk](https://www.npmjs.com/package/@wecom/aibot-node-sdk)).
|
||||
>
|
||||
> Uses **WebSocket** long connection — no public IP required.
|
||||
|
||||
**1. Install the optional dependency**
|
||||
|
||||
```bash
|
||||
pip install nanobot-ai[wecom]
|
||||
```
|
||||
|
||||
**2. Create a WeCom AI Bot**
|
||||
|
||||
Go to the WeCom admin console → Intelligent Robot → Create Robot → select **API mode** with **long connection**. Copy the Bot ID and Secret.
|
||||
|
||||
**3. Configure**
|
||||
|
||||
```json
|
||||
{
|
||||
"channels": {
|
||||
"wecom": {
|
||||
"enabled": true,
|
||||
"botId": "your_bot_id",
|
||||
"secret": "your_bot_secret",
|
||||
"allowFrom": ["your_id"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**4. Run**
|
||||
|
||||
```bash
|
||||
nanobot gateway
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 🌐 Agent Social Network
|
||||
|
||||
🐈 nanobot is capable of linking to the agent social network (agent community). **Just send one message and your nanobot joins automatically!**
|
||||
@@ -718,6 +778,7 @@ Config file: `~/.nanobot/config.json`
|
||||
| `dashscope` | LLM (Qwen) | [dashscope.console.aliyun.com](https://dashscope.console.aliyun.com) |
|
||||
| `moonshot` | LLM (Moonshot/Kimi) | [platform.moonshot.cn](https://platform.moonshot.cn) |
|
||||
| `zhipu` | LLM (Zhipu GLM) | [open.bigmodel.cn](https://open.bigmodel.cn) |
|
||||
| `ollama` | LLM (local, Ollama) | — |
|
||||
| `vllm` | LLM (local, any OpenAI-compatible server) | — |
|
||||
| `openai_codex` | LLM (Codex, OAuth) | `nanobot provider login openai-codex` |
|
||||
| `github_copilot` | LLM (GitHub Copilot, OAuth) | `nanobot provider login github-copilot` |
|
||||
@@ -783,6 +844,37 @@ Connects directly to any OpenAI-compatible endpoint — LM Studio, llama.cpp, To
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Ollama (local)</b></summary>
|
||||
|
||||
Run a local model with Ollama, then add to config:
|
||||
|
||||
**1. Start Ollama** (example):
|
||||
```bash
|
||||
ollama run llama3.2
|
||||
```
|
||||
|
||||
**2. Add to config** (partial — merge into `~/.nanobot/config.json`):
|
||||
```json
|
||||
{
|
||||
"providers": {
|
||||
"ollama": {
|
||||
"apiBase": "http://localhost:11434"
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"provider": "ollama",
|
||||
"model": "llama3.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
> `provider: "auto"` also works when `providers.ollama.apiBase` is configured, but setting `"provider": "ollama"` is the clearest option.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>vLLM (local / OpenAI-compatible)</b></summary>
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ from typing import Any
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from nanobot.bus.events import OutboundMessage
|
||||
from nanobot.bus.queue import MessageBus
|
||||
from nanobot.channels.base import BaseChannel
|
||||
from nanobot.config.schema import Config
|
||||
@@ -150,6 +149,18 @@ class ChannelManager:
|
||||
except ImportError as e:
|
||||
logger.warning("Matrix channel not available: {}", e)
|
||||
|
||||
# WeCom channel
|
||||
if self.config.channels.wecom.enabled:
|
||||
try:
|
||||
from nanobot.channels.wecom import WecomChannel
|
||||
self.channels["wecom"] = WecomChannel(
|
||||
self.config.channels.wecom,
|
||||
self.bus,
|
||||
)
|
||||
logger.info("WeCom channel enabled")
|
||||
except ImportError as e:
|
||||
logger.warning("WeCom channel not available: {}", e)
|
||||
|
||||
self._validate_allow_from()
|
||||
|
||||
def _validate_allow_from(self) -> None:
|
||||
|
||||
352
nanobot/channels/wecom.py
Normal file
352
nanobot/channels/wecom.py
Normal file
@@ -0,0 +1,352 @@
|
||||
"""WeCom (Enterprise WeChat) channel implementation using wecom_aibot_sdk."""
|
||||
|
||||
import asyncio
|
||||
import importlib.util
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
from typing import Any
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from nanobot.bus.events import OutboundMessage
|
||||
from nanobot.bus.queue import MessageBus
|
||||
from nanobot.channels.base import BaseChannel
|
||||
from nanobot.config.paths import get_media_dir
|
||||
from nanobot.config.schema import WecomConfig
|
||||
|
||||
WECOM_AVAILABLE = importlib.util.find_spec("wecom_aibot_sdk") is not None
|
||||
|
||||
# Message type display mapping
|
||||
MSG_TYPE_MAP = {
|
||||
"image": "[image]",
|
||||
"voice": "[voice]",
|
||||
"file": "[file]",
|
||||
"mixed": "[mixed content]",
|
||||
}
|
||||
|
||||
|
||||
class WecomChannel(BaseChannel):
|
||||
"""
|
||||
WeCom (Enterprise WeChat) channel using WebSocket long connection.
|
||||
|
||||
Uses WebSocket to receive events - no public IP or webhook required.
|
||||
|
||||
Requires:
|
||||
- Bot ID and Secret from WeCom AI Bot platform
|
||||
"""
|
||||
|
||||
name = "wecom"
|
||||
|
||||
def __init__(self, config: WecomConfig, bus: MessageBus):
|
||||
super().__init__(config, bus)
|
||||
self.config: WecomConfig = config
|
||||
self._client: Any = None
|
||||
self._processed_message_ids: OrderedDict[str, None] = OrderedDict()
|
||||
self._loop: asyncio.AbstractEventLoop | None = None
|
||||
self._generate_req_id = None
|
||||
# Store frame headers for each chat to enable replies
|
||||
self._chat_frames: dict[str, Any] = {}
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the WeCom bot with WebSocket long connection."""
|
||||
if not WECOM_AVAILABLE:
|
||||
logger.error("WeCom SDK not installed. Run: pip install nanobot-ai[wecom]")
|
||||
return
|
||||
|
||||
if not self.config.bot_id or not self.config.secret:
|
||||
logger.error("WeCom bot_id and secret not configured")
|
||||
return
|
||||
|
||||
from wecom_aibot_sdk import WSClient, generate_req_id
|
||||
|
||||
self._running = True
|
||||
self._loop = asyncio.get_running_loop()
|
||||
self._generate_req_id = generate_req_id
|
||||
|
||||
# Create WebSocket client
|
||||
self._client = WSClient({
|
||||
"bot_id": self.config.bot_id,
|
||||
"secret": self.config.secret,
|
||||
"reconnect_interval": 1000,
|
||||
"max_reconnect_attempts": -1, # Infinite reconnect
|
||||
"heartbeat_interval": 30000,
|
||||
})
|
||||
|
||||
# Register event handlers
|
||||
self._client.on("connected", self._on_connected)
|
||||
self._client.on("authenticated", self._on_authenticated)
|
||||
self._client.on("disconnected", self._on_disconnected)
|
||||
self._client.on("error", self._on_error)
|
||||
self._client.on("message.text", self._on_text_message)
|
||||
self._client.on("message.image", self._on_image_message)
|
||||
self._client.on("message.voice", self._on_voice_message)
|
||||
self._client.on("message.file", self._on_file_message)
|
||||
self._client.on("message.mixed", self._on_mixed_message)
|
||||
self._client.on("event.enter_chat", self._on_enter_chat)
|
||||
|
||||
logger.info("WeCom bot starting with WebSocket long connection")
|
||||
logger.info("No public IP required - using WebSocket to receive events")
|
||||
|
||||
# Connect
|
||||
await self._client.connect_async()
|
||||
|
||||
# Keep running until stopped
|
||||
while self._running:
|
||||
await asyncio.sleep(1)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the WeCom bot."""
|
||||
self._running = False
|
||||
if self._client:
|
||||
await self._client.disconnect()
|
||||
logger.info("WeCom bot stopped")
|
||||
|
||||
async def _on_connected(self, frame: Any) -> None:
|
||||
"""Handle WebSocket connected event."""
|
||||
logger.info("WeCom WebSocket connected")
|
||||
|
||||
async def _on_authenticated(self, frame: Any) -> None:
|
||||
"""Handle authentication success event."""
|
||||
logger.info("WeCom authenticated successfully")
|
||||
|
||||
async def _on_disconnected(self, frame: Any) -> None:
|
||||
"""Handle WebSocket disconnected event."""
|
||||
reason = frame.body if hasattr(frame, 'body') else str(frame)
|
||||
logger.warning("WeCom WebSocket disconnected: {}", reason)
|
||||
|
||||
async def _on_error(self, frame: Any) -> None:
|
||||
"""Handle error event."""
|
||||
logger.error("WeCom error: {}", frame)
|
||||
|
||||
async def _on_text_message(self, frame: Any) -> None:
|
||||
"""Handle text message."""
|
||||
await self._process_message(frame, "text")
|
||||
|
||||
async def _on_image_message(self, frame: Any) -> None:
|
||||
"""Handle image message."""
|
||||
await self._process_message(frame, "image")
|
||||
|
||||
async def _on_voice_message(self, frame: Any) -> None:
|
||||
"""Handle voice message."""
|
||||
await self._process_message(frame, "voice")
|
||||
|
||||
async def _on_file_message(self, frame: Any) -> None:
|
||||
"""Handle file message."""
|
||||
await self._process_message(frame, "file")
|
||||
|
||||
async def _on_mixed_message(self, frame: Any) -> None:
|
||||
"""Handle mixed content message."""
|
||||
await self._process_message(frame, "mixed")
|
||||
|
||||
async def _on_enter_chat(self, frame: Any) -> None:
|
||||
"""Handle enter_chat event (user opens chat with bot)."""
|
||||
try:
|
||||
# Extract body from WsFrame dataclass or dict
|
||||
if hasattr(frame, 'body'):
|
||||
body = frame.body or {}
|
||||
elif isinstance(frame, dict):
|
||||
body = frame.get("body", frame)
|
||||
else:
|
||||
body = {}
|
||||
|
||||
chat_id = body.get("chatid", "") if isinstance(body, dict) else ""
|
||||
|
||||
if chat_id and self.config.welcome_message:
|
||||
await self._client.reply_welcome(frame, {
|
||||
"msgtype": "text",
|
||||
"text": {"content": self.config.welcome_message},
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error("Error handling enter_chat: {}", e)
|
||||
|
||||
async def _process_message(self, frame: Any, msg_type: str) -> None:
|
||||
"""Process incoming message and forward to bus."""
|
||||
try:
|
||||
# Extract body from WsFrame dataclass or dict
|
||||
if hasattr(frame, 'body'):
|
||||
body = frame.body or {}
|
||||
elif isinstance(frame, dict):
|
||||
body = frame.get("body", frame)
|
||||
else:
|
||||
body = {}
|
||||
|
||||
# Ensure body is a dict
|
||||
if not isinstance(body, dict):
|
||||
logger.warning("Invalid body type: {}", type(body))
|
||||
return
|
||||
|
||||
# Extract message info
|
||||
msg_id = body.get("msgid", "")
|
||||
if not msg_id:
|
||||
msg_id = f"{body.get('chatid', '')}_{body.get('sendertime', '')}"
|
||||
|
||||
# Deduplication check
|
||||
if msg_id in self._processed_message_ids:
|
||||
return
|
||||
self._processed_message_ids[msg_id] = None
|
||||
|
||||
# Trim cache
|
||||
while len(self._processed_message_ids) > 1000:
|
||||
self._processed_message_ids.popitem(last=False)
|
||||
|
||||
# Extract sender info from "from" field (SDK format)
|
||||
from_info = body.get("from", {})
|
||||
sender_id = from_info.get("userid", "unknown") if isinstance(from_info, dict) else "unknown"
|
||||
|
||||
# For single chat, chatid is the sender's userid
|
||||
# For group chat, chatid is provided in body
|
||||
chat_type = body.get("chattype", "single")
|
||||
chat_id = body.get("chatid", sender_id)
|
||||
|
||||
content_parts = []
|
||||
|
||||
if msg_type == "text":
|
||||
text = body.get("text", {}).get("content", "")
|
||||
if text:
|
||||
content_parts.append(text)
|
||||
|
||||
elif msg_type == "image":
|
||||
image_info = body.get("image", {})
|
||||
file_url = image_info.get("url", "")
|
||||
aes_key = image_info.get("aeskey", "")
|
||||
|
||||
if file_url and aes_key:
|
||||
file_path = await self._download_and_save_media(file_url, aes_key, "image")
|
||||
if file_path:
|
||||
filename = os.path.basename(file_path)
|
||||
content_parts.append(f"[image: {filename}]\n[Image: source: {file_path}]")
|
||||
else:
|
||||
content_parts.append("[image: download failed]")
|
||||
else:
|
||||
content_parts.append("[image: download failed]")
|
||||
|
||||
elif msg_type == "voice":
|
||||
voice_info = body.get("voice", {})
|
||||
# Voice message already contains transcribed content from WeCom
|
||||
voice_content = voice_info.get("content", "")
|
||||
if voice_content:
|
||||
content_parts.append(f"[voice] {voice_content}")
|
||||
else:
|
||||
content_parts.append("[voice]")
|
||||
|
||||
elif msg_type == "file":
|
||||
file_info = body.get("file", {})
|
||||
file_url = file_info.get("url", "")
|
||||
aes_key = file_info.get("aeskey", "")
|
||||
file_name = file_info.get("name", "unknown")
|
||||
|
||||
if file_url and aes_key:
|
||||
file_path = await self._download_and_save_media(file_url, aes_key, "file", file_name)
|
||||
if file_path:
|
||||
content_parts.append(f"[file: {file_name}]\n[File: source: {file_path}]")
|
||||
else:
|
||||
content_parts.append(f"[file: {file_name}: download failed]")
|
||||
else:
|
||||
content_parts.append(f"[file: {file_name}: download failed]")
|
||||
|
||||
elif msg_type == "mixed":
|
||||
# Mixed content contains multiple message items
|
||||
msg_items = body.get("mixed", {}).get("item", [])
|
||||
for item in msg_items:
|
||||
item_type = item.get("type", "")
|
||||
if item_type == "text":
|
||||
text = item.get("text", {}).get("content", "")
|
||||
if text:
|
||||
content_parts.append(text)
|
||||
else:
|
||||
content_parts.append(MSG_TYPE_MAP.get(item_type, f"[{item_type}]"))
|
||||
|
||||
else:
|
||||
content_parts.append(MSG_TYPE_MAP.get(msg_type, f"[{msg_type}]"))
|
||||
|
||||
content = "\n".join(content_parts) if content_parts else ""
|
||||
|
||||
if not content:
|
||||
return
|
||||
|
||||
# Store frame for this chat to enable replies
|
||||
self._chat_frames[chat_id] = frame
|
||||
|
||||
# Forward to message bus
|
||||
# Note: media paths are included in content for broader model compatibility
|
||||
await self._handle_message(
|
||||
sender_id=sender_id,
|
||||
chat_id=chat_id,
|
||||
content=content,
|
||||
media=None,
|
||||
metadata={
|
||||
"message_id": msg_id,
|
||||
"msg_type": msg_type,
|
||||
"chat_type": chat_type,
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error processing WeCom message: {}", e)
|
||||
|
||||
async def _download_and_save_media(
|
||||
self,
|
||||
file_url: str,
|
||||
aes_key: str,
|
||||
media_type: str,
|
||||
filename: str | None = None,
|
||||
) -> str | None:
|
||||
"""
|
||||
Download and decrypt media from WeCom.
|
||||
|
||||
Returns:
|
||||
file_path or None if download failed
|
||||
"""
|
||||
try:
|
||||
data, fname = await self._client.download_file(file_url, aes_key)
|
||||
|
||||
if not data:
|
||||
logger.warning("Failed to download media from WeCom")
|
||||
return None
|
||||
|
||||
media_dir = get_media_dir("wecom")
|
||||
if not filename:
|
||||
filename = fname or f"{media_type}_{hash(file_url) % 100000}"
|
||||
filename = os.path.basename(filename)
|
||||
|
||||
file_path = media_dir / filename
|
||||
file_path.write_bytes(data)
|
||||
logger.debug("Downloaded {} to {}", media_type, file_path)
|
||||
return str(file_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error downloading media: {}", e)
|
||||
return None
|
||||
|
||||
async def send(self, msg: OutboundMessage) -> None:
|
||||
"""Send a message through WeCom."""
|
||||
if not self._client:
|
||||
logger.warning("WeCom client not initialized")
|
||||
return
|
||||
|
||||
try:
|
||||
content = msg.content.strip()
|
||||
if not content:
|
||||
return
|
||||
|
||||
# Get the stored frame for this chat
|
||||
frame = self._chat_frames.get(msg.chat_id)
|
||||
if not frame:
|
||||
logger.warning("No frame found for chat {}, cannot reply", msg.chat_id)
|
||||
return
|
||||
|
||||
# Use streaming reply for better UX
|
||||
stream_id = self._generate_req_id("stream")
|
||||
|
||||
# Send as streaming message with finish=True
|
||||
await self._client.reply_stream(
|
||||
frame,
|
||||
stream_id,
|
||||
content,
|
||||
finish=True,
|
||||
)
|
||||
|
||||
logger.debug("WeCom message sent to {}", msg.chat_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error sending WeCom message: {}", e)
|
||||
@@ -252,7 +252,7 @@ def _make_provider(config: Config):
|
||||
from nanobot.providers.litellm_provider import LiteLLMProvider
|
||||
from nanobot.providers.registry import find_by_name
|
||||
spec = find_by_name(provider_name)
|
||||
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and spec.is_oauth):
|
||||
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and (spec.is_oauth or spec.is_local)):
|
||||
console.print("[red]Error: No API key configured.[/red]")
|
||||
console.print("Set one in ~/.nanobot/config.json under providers section")
|
||||
raise typer.Exit(1)
|
||||
|
||||
@@ -200,6 +200,14 @@ class QQConfig(Base):
|
||||
) # Allowed user openids (empty = public access)
|
||||
|
||||
|
||||
class WecomConfig(Base):
|
||||
"""WeCom (Enterprise WeChat) AI Bot channel configuration."""
|
||||
|
||||
enabled: bool = False
|
||||
bot_id: str = "" # Bot ID from WeCom AI Bot platform
|
||||
secret: str = "" # Bot Secret from WeCom AI Bot platform
|
||||
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs
|
||||
welcome_message: str = "" # Welcome message for enter_chat event
|
||||
|
||||
|
||||
class ChannelsConfig(Base):
|
||||
@@ -217,6 +225,7 @@ class ChannelsConfig(Base):
|
||||
slack: SlackConfig = Field(default_factory=SlackConfig)
|
||||
qq: QQConfig = Field(default_factory=QQConfig)
|
||||
matrix: MatrixConfig = Field(default_factory=MatrixConfig)
|
||||
wecom: WecomConfig = Field(default_factory=WecomConfig)
|
||||
|
||||
|
||||
class AgentDefaults(Base):
|
||||
@@ -272,6 +281,7 @@ class ProvidersConfig(Base):
|
||||
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
|
||||
ollama: ProviderConfig = Field(default_factory=ProviderConfig) # Ollama local models
|
||||
siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动)
|
||||
volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎)
|
||||
openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
|
||||
@@ -375,16 +385,25 @@ class Config(BaseSettings):
|
||||
for spec in PROVIDERS:
|
||||
p = getattr(self.providers, spec.name, None)
|
||||
if p and model_prefix and normalized_prefix == spec.name:
|
||||
if spec.is_oauth or p.api_key:
|
||||
if spec.is_oauth or spec.is_local or p.api_key:
|
||||
return p, spec.name
|
||||
|
||||
# Match by keyword (order follows PROVIDERS registry)
|
||||
for spec in PROVIDERS:
|
||||
p = getattr(self.providers, spec.name, None)
|
||||
if p and any(_kw_matches(kw) for kw in spec.keywords):
|
||||
if spec.is_oauth or p.api_key:
|
||||
if spec.is_oauth or spec.is_local or p.api_key:
|
||||
return p, spec.name
|
||||
|
||||
# Fallback: configured local providers can route models without
|
||||
# provider-specific keywords (for example plain "llama3.2" on Ollama).
|
||||
for spec in PROVIDERS:
|
||||
if not spec.is_local:
|
||||
continue
|
||||
p = getattr(self.providers, spec.name, None)
|
||||
if p and p.api_base:
|
||||
return p, spec.name
|
||||
|
||||
# Fallback: gateways first, then others (follows registry order)
|
||||
# OAuth providers are NOT valid fallbacks — they require explicit model selection
|
||||
for spec in PROVIDERS:
|
||||
@@ -411,7 +430,7 @@ class Config(BaseSettings):
|
||||
return p.api_key if p else None
|
||||
|
||||
def get_api_base(self, model: str | None = None) -> str | None:
|
||||
"""Get API base URL for the given model. Applies default URLs for known gateways."""
|
||||
"""Get API base URL for the given model. Applies default URLs for gateway/local providers."""
|
||||
from nanobot.providers.registry import find_by_name
|
||||
|
||||
p, name = self._match_provider(model)
|
||||
@@ -422,7 +441,7 @@ class Config(BaseSettings):
|
||||
# to avoid polluting the global litellm.api_base.
|
||||
if name:
|
||||
spec = find_by_name(name)
|
||||
if spec and spec.is_gateway and spec.default_api_base:
|
||||
if spec and (spec.is_gateway or spec.is_local) and spec.default_api_base:
|
||||
return spec.default_api_base
|
||||
return None
|
||||
|
||||
|
||||
@@ -360,6 +360,23 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
|
||||
strip_model_prefix=False,
|
||||
model_overrides=(),
|
||||
),
|
||||
# === Ollama (local, OpenAI-compatible) ===================================
|
||||
ProviderSpec(
|
||||
name="ollama",
|
||||
keywords=("ollama", "nemotron"),
|
||||
env_key="OLLAMA_API_KEY",
|
||||
display_name="Ollama",
|
||||
litellm_prefix="ollama_chat", # model → ollama_chat/model
|
||||
skip_prefixes=("ollama/", "ollama_chat/"),
|
||||
env_extras=(),
|
||||
is_gateway=False,
|
||||
is_local=True,
|
||||
detect_by_key_prefix="",
|
||||
detect_by_base_keyword="11434",
|
||||
default_api_base="http://localhost:11434",
|
||||
strip_model_prefix=False,
|
||||
model_overrides=(),
|
||||
),
|
||||
# === Auxiliary (not a primary LLM provider) ============================
|
||||
# Groq: mainly used for Whisper voice transcription, also usable for LLM.
|
||||
# Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback.
|
||||
|
||||
@@ -48,6 +48,9 @@ dependencies = [
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
wecom = [
|
||||
"wecom-aibot-sdk-python @ git+https://github.com/chengyongru/wecom_aibot_sdk.git@v0.1.2",
|
||||
]
|
||||
matrix = [
|
||||
"matrix-nio[e2e]>=0.25.2",
|
||||
"mistune>=3.0.0,<4.0.0",
|
||||
|
||||
@@ -114,6 +114,35 @@ def test_config_matches_openai_codex_with_hyphen_prefix():
|
||||
assert config.get_provider_name() == "openai_codex"
|
||||
|
||||
|
||||
def test_config_matches_explicit_ollama_prefix_without_api_key():
|
||||
config = Config()
|
||||
config.agents.defaults.model = "ollama/llama3.2"
|
||||
|
||||
assert config.get_provider_name() == "ollama"
|
||||
assert config.get_api_base() == "http://localhost:11434"
|
||||
|
||||
|
||||
def test_config_explicit_ollama_provider_uses_default_localhost_api_base():
|
||||
config = Config()
|
||||
config.agents.defaults.provider = "ollama"
|
||||
config.agents.defaults.model = "llama3.2"
|
||||
|
||||
assert config.get_provider_name() == "ollama"
|
||||
assert config.get_api_base() == "http://localhost:11434"
|
||||
|
||||
|
||||
def test_config_auto_detects_ollama_from_local_api_base():
|
||||
config = Config.model_validate(
|
||||
{
|
||||
"agents": {"defaults": {"provider": "auto", "model": "llama3.2"}},
|
||||
"providers": {"ollama": {"apiBase": "http://localhost:11434"}},
|
||||
}
|
||||
)
|
||||
|
||||
assert config.get_provider_name() == "ollama"
|
||||
assert config.get_api_base() == "http://localhost:11434"
|
||||
|
||||
|
||||
def test_find_by_model_prefers_explicit_prefix_over_generic_codex_keyword():
|
||||
spec = find_by_model("github-copilot/gpt-5.3-codex")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user