Merge remote-tracking branch 'origin/main'
This commit is contained in:
@@ -112,15 +112,17 @@ class MemoryStore:
|
||||
## Conversation to Process
|
||||
{self._format_messages(messages)}"""
|
||||
|
||||
chat_messages = [
|
||||
{"role": "system", "content": "You are a memory consolidation agent. Call the save_memory tool with your consolidation of the conversation."},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
|
||||
try:
|
||||
response = await provider.chat_with_retry(
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a memory consolidation agent. Call the save_memory tool with your consolidation of the conversation."},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
messages=chat_messages,
|
||||
tools=_SAVE_MEMORY_TOOL,
|
||||
model=model,
|
||||
tool_choice="required",
|
||||
tool_choice={"type": "function", "function": {"name": "save_memory"}},
|
||||
)
|
||||
|
||||
if not response.has_tool_calls:
|
||||
|
||||
@@ -19,10 +19,12 @@ if sys.platform == "win32":
|
||||
pass
|
||||
|
||||
import typer
|
||||
from prompt_toolkit import print_formatted_text
|
||||
from prompt_toolkit import PromptSession
|
||||
from prompt_toolkit.formatted_text import HTML
|
||||
from prompt_toolkit.formatted_text import ANSI, HTML
|
||||
from prompt_toolkit.history import FileHistory
|
||||
from prompt_toolkit.patch_stdout import patch_stdout
|
||||
from prompt_toolkit.application import run_in_terminal
|
||||
from rich.console import Console
|
||||
from rich.markdown import Markdown
|
||||
from rich.table import Table
|
||||
@@ -111,8 +113,25 @@ def _init_prompt_session() -> None:
|
||||
)
|
||||
|
||||
|
||||
def _make_console() -> Console:
|
||||
return Console(file=sys.stdout)
|
||||
|
||||
|
||||
def _render_interactive_ansi(render_fn) -> str:
|
||||
"""Render Rich output to ANSI so prompt_toolkit can print it safely."""
|
||||
ansi_console = Console(
|
||||
force_terminal=True,
|
||||
color_system=console.color_system or "standard",
|
||||
width=console.width,
|
||||
)
|
||||
with ansi_console.capture() as capture:
|
||||
render_fn(ansi_console)
|
||||
return capture.get()
|
||||
|
||||
|
||||
def _print_agent_response(response: str, render_markdown: bool) -> None:
|
||||
"""Render assistant response with consistent terminal styling."""
|
||||
console = _make_console()
|
||||
content = response or ""
|
||||
body = Markdown(content) if render_markdown else Text(content)
|
||||
console.print()
|
||||
@@ -121,6 +140,34 @@ def _print_agent_response(response: str, render_markdown: bool) -> None:
|
||||
console.print()
|
||||
|
||||
|
||||
async def _print_interactive_line(text: str) -> None:
|
||||
"""Print async interactive updates with prompt_toolkit-safe Rich styling."""
|
||||
def _write() -> None:
|
||||
ansi = _render_interactive_ansi(
|
||||
lambda c: c.print(f" [dim]↳ {text}[/dim]")
|
||||
)
|
||||
print_formatted_text(ANSI(ansi), end="")
|
||||
|
||||
await run_in_terminal(_write)
|
||||
|
||||
|
||||
async def _print_interactive_response(response: str, render_markdown: bool) -> None:
|
||||
"""Print async interactive replies with prompt_toolkit-safe Rich styling."""
|
||||
def _write() -> None:
|
||||
content = response or ""
|
||||
ansi = _render_interactive_ansi(
|
||||
lambda c: (
|
||||
c.print(),
|
||||
c.print(f"[cyan]{__logo__} nanobot[/cyan]"),
|
||||
c.print(Markdown(content) if render_markdown else Text(content)),
|
||||
c.print(),
|
||||
)
|
||||
)
|
||||
print_formatted_text(ANSI(ansi), end="")
|
||||
|
||||
await run_in_terminal(_write)
|
||||
|
||||
|
||||
def _is_exit_command(command: str) -> bool:
|
||||
"""Return True when input should end interactive chat."""
|
||||
return command.lower() in EXIT_COMMANDS
|
||||
@@ -616,14 +663,15 @@ def agent(
|
||||
elif ch and not is_tool_hint and not ch.send_progress:
|
||||
pass
|
||||
else:
|
||||
console.print(f" [dim]↳ {msg.content}[/dim]")
|
||||
await _print_interactive_line(msg.content)
|
||||
|
||||
elif not turn_done.is_set():
|
||||
if msg.content:
|
||||
turn_response.append(msg.content)
|
||||
turn_done.set()
|
||||
elif msg.content:
|
||||
console.print()
|
||||
_print_agent_response(msg.content, render_markdown=markdown)
|
||||
await _print_interactive_response(msg.content, render_markdown=markdown)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
continue
|
||||
except asyncio.CancelledError:
|
||||
|
||||
@@ -276,15 +276,18 @@ class ProvidersConfig(Base):
|
||||
deepseek: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
groq: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
zhipu: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
dashscope: ProviderConfig = Field(default_factory=ProviderConfig) # 阿里云通义千问
|
||||
dashscope: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
vllm: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
ollama: ProviderConfig = Field(default_factory=ProviderConfig) # Ollama local models
|
||||
gemini: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
|
||||
ollama: ProviderConfig = Field(default_factory=ProviderConfig) # Ollama local models
|
||||
siliconflow: ProviderConfig = Field(default_factory=ProviderConfig) # SiliconFlow (硅基流动)
|
||||
volcengine: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine (火山引擎)
|
||||
volcengine_coding_plan: ProviderConfig = Field(default_factory=ProviderConfig) # VolcEngine Coding Plan
|
||||
byteplus: ProviderConfig = Field(default_factory=ProviderConfig) # BytePlus (VolcEngine international)
|
||||
byteplus_coding_plan: ProviderConfig = Field(default_factory=ProviderConfig) # BytePlus Coding Plan
|
||||
openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
|
||||
github_copilot: ProviderConfig = Field(default_factory=ProviderConfig) # Github Copilot (OAuth)
|
||||
|
||||
@@ -400,12 +403,21 @@ class Config(BaseSettings):
|
||||
|
||||
# Fallback: configured local providers can route models without
|
||||
# provider-specific keywords (for example plain "llama3.2" on Ollama).
|
||||
# Prefer providers whose detect_by_base_keyword matches the configured api_base
|
||||
# (e.g. Ollama's "11434" in "http://localhost:11434") over plain registry order.
|
||||
local_fallback: tuple[ProviderConfig, str] | None = None
|
||||
for spec in PROVIDERS:
|
||||
if not spec.is_local:
|
||||
continue
|
||||
p = getattr(self.providers, spec.name, None)
|
||||
if p and p.api_base:
|
||||
if not (p and p.api_base):
|
||||
continue
|
||||
if spec.detect_by_base_keyword and spec.detect_by_base_keyword in p.api_base:
|
||||
return p, spec.name
|
||||
if local_fallback is None:
|
||||
local_fallback = (p, spec.name)
|
||||
if local_fallback:
|
||||
return local_fallback
|
||||
|
||||
# Fallback: gateways first, then others (follows registry order)
|
||||
# OAuth providers are NOT valid fallbacks — they require explicit model selection
|
||||
|
||||
@@ -145,7 +145,8 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
|
||||
strip_model_prefix=False,
|
||||
model_overrides=(),
|
||||
),
|
||||
# VolcEngine (火山引擎): OpenAI-compatible gateway
|
||||
|
||||
# VolcEngine (火山引擎): OpenAI-compatible gateway, pay-per-use models
|
||||
ProviderSpec(
|
||||
name="volcengine",
|
||||
keywords=("volcengine", "volces", "ark"),
|
||||
@@ -162,6 +163,62 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
|
||||
strip_model_prefix=False,
|
||||
model_overrides=(),
|
||||
),
|
||||
|
||||
# VolcEngine Coding Plan (火山引擎 Coding Plan): same key as volcengine
|
||||
ProviderSpec(
|
||||
name="volcengine_coding_plan",
|
||||
keywords=("volcengine-plan",),
|
||||
env_key="OPENAI_API_KEY",
|
||||
display_name="VolcEngine Coding Plan",
|
||||
litellm_prefix="volcengine",
|
||||
skip_prefixes=(),
|
||||
env_extras=(),
|
||||
is_gateway=True,
|
||||
is_local=False,
|
||||
detect_by_key_prefix="",
|
||||
detect_by_base_keyword="",
|
||||
default_api_base="https://ark.cn-beijing.volces.com/api/coding/v3",
|
||||
strip_model_prefix=True,
|
||||
model_overrides=(),
|
||||
),
|
||||
|
||||
# BytePlus: VolcEngine international, pay-per-use models
|
||||
ProviderSpec(
|
||||
name="byteplus",
|
||||
keywords=("byteplus",),
|
||||
env_key="OPENAI_API_KEY",
|
||||
display_name="BytePlus",
|
||||
litellm_prefix="volcengine",
|
||||
skip_prefixes=(),
|
||||
env_extras=(),
|
||||
is_gateway=True,
|
||||
is_local=False,
|
||||
detect_by_key_prefix="",
|
||||
detect_by_base_keyword="bytepluses",
|
||||
default_api_base="https://ark.ap-southeast.bytepluses.com/api/v3",
|
||||
strip_model_prefix=True,
|
||||
model_overrides=(),
|
||||
),
|
||||
|
||||
# BytePlus Coding Plan: same key as byteplus
|
||||
ProviderSpec(
|
||||
name="byteplus_coding_plan",
|
||||
keywords=("byteplus-plan",),
|
||||
env_key="OPENAI_API_KEY",
|
||||
display_name="BytePlus Coding Plan",
|
||||
litellm_prefix="volcengine",
|
||||
skip_prefixes=(),
|
||||
env_extras=(),
|
||||
is_gateway=True,
|
||||
is_local=False,
|
||||
detect_by_key_prefix="",
|
||||
detect_by_base_keyword="",
|
||||
default_api_base="https://ark.ap-southeast.bytepluses.com/api/coding/v3",
|
||||
strip_model_prefix=True,
|
||||
model_overrides=(),
|
||||
),
|
||||
|
||||
|
||||
# === Standard providers (matched by model-name keywords) ===============
|
||||
# Anthropic: LiteLLM recognizes "claude-*" natively, no prefix needed.
|
||||
ProviderSpec(
|
||||
|
||||
Reference in New Issue
Block a user