resolve conflicts with main
This commit is contained in:
@@ -42,7 +42,9 @@ class LiteLLMProvider(LLMProvider):
|
||||
os.environ["OPENROUTER_API_KEY"] = api_key
|
||||
elif self.is_vllm:
|
||||
# vLLM/custom endpoint - uses OpenAI-compatible API
|
||||
os.environ["OPENAI_API_KEY"] = api_key
|
||||
os.environ["HOSTED_VLLM_API_KEY"] = api_key
|
||||
elif "deepseek" in default_model:
|
||||
os.environ.setdefault("DEEPSEEK_API_KEY", api_key)
|
||||
elif "anthropic" in default_model:
|
||||
os.environ.setdefault("ANTHROPIC_API_KEY", api_key)
|
||||
elif "openai" in default_model or "gpt" in default_model:
|
||||
@@ -55,6 +57,9 @@ class LiteLLMProvider(LLMProvider):
|
||||
os.environ.setdefault("DASHSCOPE_API_KEY", api_key)
|
||||
elif "groq" in default_model:
|
||||
os.environ.setdefault("GROQ_API_KEY", api_key)
|
||||
elif "moonshot" in default_model or "kimi" in default_model:
|
||||
os.environ.setdefault("MOONSHOT_API_KEY", api_key)
|
||||
os.environ.setdefault("MOONSHOT_API_BASE", api_base or "https://api.moonshot.cn/v1")
|
||||
|
||||
if api_base:
|
||||
litellm.api_base = api_base
|
||||
@@ -90,30 +95,41 @@ class LiteLLMProvider(LLMProvider):
|
||||
model = f"openrouter/{model}"
|
||||
|
||||
# For Zhipu/Z.ai, ensure prefix is present
|
||||
# Handle cases like "glm-4.7-flash" -> "zhipu/glm-4.7-flash"
|
||||
# Handle cases like "glm-4.7-flash" -> "zai/glm-4.7-flash"
|
||||
if ("glm" in model.lower() or "zhipu" in model.lower()) and not (
|
||||
model.startswith("zhipu/") or
|
||||
model.startswith("zai/") or
|
||||
model.startswith("openrouter/")
|
||||
):
|
||||
model = f"zhipu/{model}"
|
||||
model = f"zai/{model}"
|
||||
|
||||
# For DashScope/Qwen, ensure prefix is present
|
||||
# For DashScope/Qwen, ensure dashscope/ prefix
|
||||
if ("qwen" in model.lower() or "dashscope" in model.lower()) and not (
|
||||
model.startswith("dashscope/") or
|
||||
model.startswith("openrouter/")
|
||||
):
|
||||
model = f"dashscope/{model}"
|
||||
|
||||
|
||||
# For Moonshot/Kimi, ensure moonshot/ prefix (before vLLM check)
|
||||
if ("moonshot" in model.lower() or "kimi" in model.lower()) and not (
|
||||
model.startswith("moonshot/") or model.startswith("openrouter/")
|
||||
):
|
||||
model = f"moonshot/{model}"
|
||||
|
||||
# For Gemini, ensure gemini/ prefix if not already present
|
||||
if "gemini" in model.lower() and not model.startswith("gemini/"):
|
||||
model = f"gemini/{model}"
|
||||
|
||||
|
||||
# For vLLM, use hosted_vllm/ prefix per LiteLLM docs
|
||||
# Convert openai/ prefix to hosted_vllm/ if user specified it
|
||||
if self.is_vllm:
|
||||
model = f"hosted_vllm/{model}"
|
||||
|
||||
# For Gemini, ensure gemini/ prefix if not already present
|
||||
if "gemini" in model.lower() and not model.startswith("gemini/"):
|
||||
model = f"gemini/{model}"
|
||||
|
||||
# kimi-k2.5 only supports temperature=1.0
|
||||
if "kimi-k2.5" in model.lower():
|
||||
temperature = 1.0
|
||||
|
||||
kwargs: dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
|
||||
Reference in New Issue
Block a user