Merge branch 'main' into pr-903

This commit is contained in:
Re-bin
2026-02-20 15:21:11 +00:00
3 changed files with 20 additions and 6 deletions

View File

@@ -84,11 +84,24 @@ class SlackChannel(BaseChannel):
channel_type = slack_meta.get("channel_type")
# Only reply in thread for channel/group messages; DMs don't use threads
use_thread = thread_ts and channel_type != "im"
await self._web_client.chat_postMessage(
channel=msg.chat_id,
text=self._to_mrkdwn(msg.content),
thread_ts=thread_ts if use_thread else None,
)
thread_ts_param = thread_ts if use_thread else None
if msg.content:
await self._web_client.chat_postMessage(
channel=msg.chat_id,
text=self._to_mrkdwn(msg.content),
thread_ts=thread_ts_param,
)
for media_path in msg.media or []:
try:
await self._web_client.files_upload_v2(
channel=msg.chat_id,
file=media_path,
thread_ts=thread_ts_param,
)
except Exception as e:
logger.error("Failed to upload file {}: {}", media_path, e)
except Exception as e:
logger.error("Error sending Slack message: {}", e)

View File

@@ -111,7 +111,7 @@ class LiteLLMProvider(LLMProvider):
def _supports_cache_control(self, model: str) -> bool:
"""Return True when the provider supports cache_control on content blocks."""
if self._gateway is not None:
return False
return self._gateway.supports_prompt_caching
spec = find_by_model(model)
return spec is not None and spec.supports_prompt_caching

View File

@@ -100,6 +100,7 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
default_api_base="https://openrouter.ai/api/v1",
strip_model_prefix=False,
model_overrides=(),
supports_prompt_caching=True,
),
# AiHubMix: global gateway, OpenAI-compatible interface.