From af71ccf0514721777aaae500f05016d844fe5fc6 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 21 Feb 2026 13:05:14 +0000 Subject: [PATCH 1/5] release: v0.1.4.post1 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 64a884d..c337d02 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "nanobot-ai" -version = "0.1.4" +version = "0.1.4.post1" description = "A lightweight personal AI assistant framework" requires-python = ">=3.11" license = {text = "MIT"} From 88ca2e05307b66dfebe469d884c17217ea3b17d6 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 21 Feb 2026 13:20:55 +0000 Subject: [PATCH 2/5] docs: update v.0.1.4.post1 release news --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 8e1202c..1002872 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ ## πŸ“’ News +- **2026-02-21** πŸŽ‰ Released **v0.1.4.post1** β€” new providers, media support across channels, and major stability improvements. See [release notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.4.post1) for details. - **2026-02-20** 🐦 Feishu now receives multimodal files from users. More reliable memory under the hood. - **2026-02-19** ✨ Slack now sends files, Discord splits long messages, and subagents work in CLI mode. - **2026-02-18** ⚑️ nanobot now supports VolcEngine, MCP custom auth headers, and Anthropic prompt caching. From 01c835aac2bfc676d6213261c6ad6cb7301bb83e Mon Sep 17 00:00:00 2001 From: nanobot-bot Date: Sat, 21 Feb 2026 23:07:18 +0800 Subject: [PATCH 3/5] fix(context): Fix 'Missing `reasoning_content` field' error for deepseek provider. --- nanobot/agent/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nanobot/agent/context.py b/nanobot/agent/context.py index 876d43d..b3de8da 100644 --- a/nanobot/agent/context.py +++ b/nanobot/agent/context.py @@ -235,7 +235,7 @@ To recall past events, grep {workspace_path}/memory/HISTORY.md""" msg["tool_calls"] = tool_calls # Include reasoning content when provided (required by some thinking models) - if reasoning_content: + if reasoning_content is not None: msg["reasoning_content"] = reasoning_content messages.append(msg) From edc671a8a30fd05c46a15b0da7292fc9c4c4b5be Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sat, 21 Feb 2026 16:39:26 +0000 Subject: [PATCH 4/5] docs: update format of news section --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 1002872..cb751ba 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,7 @@
Earlier news + - **2026-02-10** πŸŽ‰ Released **v0.1.3.post6** with improvements! Check the updates [notes](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post6) and our [roadmap](https://github.com/HKUDS/nanobot/discussions/431). - **2026-02-09** πŸ’¬ Added Slack, Email, and QQ support β€” nanobot now supports multiple chat platforms! - **2026-02-08** πŸ”§ Refactored Providersβ€”adding a new LLM provider now takes just 2 simple steps! Check [here](#providers). @@ -43,6 +44,7 @@ - **2026-02-04** πŸš€ Released **v0.1.3.post4** with multi-provider & Docker support! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post4) for details. - **2026-02-03** ⚑ Integrated vLLM for local LLM support and improved natural language task scheduling! - **2026-02-02** πŸŽ‰ nanobot officially launched! Welcome to try 🐈 nanobot! +
## Key Features of nanobot: From deae84482d786f5cb99ed526bf84edeba17caba8 Mon Sep 17 00:00:00 2001 From: init-new-world Date: Sun, 22 Feb 2026 00:42:41 +0800 Subject: [PATCH 5/5] fix: change VolcEngine litellm prefix from openai to volcengine --- nanobot/providers/registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nanobot/providers/registry.py b/nanobot/providers/registry.py index ecf092f..2766929 100644 --- a/nanobot/providers/registry.py +++ b/nanobot/providers/registry.py @@ -147,7 +147,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = ( keywords=("volcengine", "volces", "ark"), env_key="OPENAI_API_KEY", display_name="VolcEngine", - litellm_prefix="openai", + litellm_prefix="volcengine", skip_prefixes=(), env_extras=(), is_gateway=True,