feat: Add VolcEngine LLM provider support

- Add VolcEngine ProviderSpec entry in registry.py
- Add volcengine to ProvidersConfig class in schema.py
- Update model providers table in README.md
- Add description about VolcEngine coding plan endpoint
This commit is contained in:
Your Name
2026-02-19 03:00:44 +08:00
parent 8de36d398f
commit 1663517998
3 changed files with 21 additions and 0 deletions

View File

@@ -137,6 +137,24 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
model_overrides=(),
),
# VolcEngine (火山引擎): OpenAI-compatible gateway
ProviderSpec(
name="volcengine",
keywords=("volcengine", "volces", "ark"),
env_key="OPENAI_API_KEY",
display_name="VolcEngine",
litellm_prefix="openai",
skip_prefixes=(),
env_extras=(),
is_gateway=True,
is_local=False,
detect_by_key_prefix="",
detect_by_base_keyword="volces",
default_api_base="https://ark.cn-beijing.volces.com/api/v3",
strip_model_prefix=False,
model_overrides=(),
),
# === Standard providers (matched by model-name keywords) ===============
# Anthropic: LiteLLM recognizes "claude-*" natively, no prefix needed.