Skip to content

Commit dd1e508

Browse files
authored
Merge branch 'main' into main
2 parents b3bb2e3 + c521589 commit dd1e508

8 files changed

Lines changed: 105 additions & 1 deletion

File tree

.env.example

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,10 @@ OPENROUTER_API_KEY=""
1010
DEEPSEEK_API_KEY=""
1111

1212

13+
# Kimi Config (Moonshot OpenAI-compatible API)
14+
KIMI_API_KEY=""
15+
16+
1317
# LM Studio Config (local provider, no API key required)
1418
LM_STUDIO_BASE_URL="http://localhost:1234/v1"
1519

@@ -24,7 +28,7 @@ OLLAMA_BASE_URL="http://localhost:11434"
2428

2529
# All Claude model requests are mapped to these models, plain model is fallback
2630
# Format: provider_type/model/name
27-
# Valid providers: "nvidia_nim" | "open_router" | "deepseek" | "lmstudio" | "llamacpp" | "ollama"
31+
# Valid providers: "nvidia_nim" | "open_router" | "deepseek" | "lmstudio" | "llamacpp" | "ollama" | "kimi"
2832
MODEL_OPUS=
2933
MODEL_SONNET=
3034
MODEL_HAIKU=
@@ -39,6 +43,7 @@ FCC_SMOKE_MODEL_DEEPSEEK=
3943
FCC_SMOKE_MODEL_LMSTUDIO=
4044
FCC_SMOKE_MODEL_LLAMACPP=
4145
FCC_SMOKE_MODEL_OLLAMA=
46+
FCC_SMOKE_MODEL_KIMI=
4247

4348

4449
# Thinking output
@@ -56,6 +61,7 @@ NVIDIA_NIM_PROXY=""
5661
OPENROUTER_PROXY=""
5762
LMSTUDIO_PROXY=""
5863
LLAMACPP_PROXY=""
64+
KIMI_PROXY=""
5965

6066
PROVIDER_RATE_LIMIT=1
6167
PROVIDER_RATE_WINDOW=3

config/provider_catalog.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313

1414
# Default upstream base URLs (also re-exported via :mod:`providers.defaults`)
1515
NVIDIA_NIM_DEFAULT_BASE = "https://integrate.api.nvidia.com/v1"
16+
KIMI_DEFAULT_BASE = "https://api.moonshot.ai/v1"
1617
# DeepSeek Anthropic-compatible Messages API (not OpenAI ``/v1`` chat completions).
1718
DEEPSEEK_ANTHROPIC_DEFAULT_BASE = "https://api.deepseek.com/anthropic"
1819
# Historical export name: DeepSeek upstream is the native Anthropic path above.
@@ -102,6 +103,16 @@ class ProviderDescriptor:
102103
"local",
103104
),
104105
),
106+
"kimi": ProviderDescriptor(
107+
provider_id="kimi",
108+
transport_type="openai_chat",
109+
credential_env="KIMI_API_KEY",
110+
credential_url="https://platform.moonshot.cn/console/api-keys",
111+
credential_attr="kimi_api_key",
112+
default_base_url=KIMI_DEFAULT_BASE,
113+
proxy_attr="kimi_proxy",
114+
capabilities=("chat", "streaming", "tools"),
115+
),
105116
}
106117

107118
# Order matches docs / historical error text; must match PROVIDER_CATALOG keys.

config/settings.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,9 @@ class Settings(BaseSettings):
112112
# ==================== DeepSeek Config ====================
113113
deepseek_api_key: str = Field(default="", validation_alias="DEEPSEEK_API_KEY")
114114

115+
# ==================== Kimi Config ====================
116+
kimi_api_key: str = Field(default="", validation_alias="KIMI_API_KEY")
117+
115118
# ==================== Messaging Platform Selection ====================
116119
# Valid: "telegram" | "discord" | "none"
117120
messaging_platform: str = Field(
@@ -161,6 +164,7 @@ class Settings(BaseSettings):
161164
open_router_proxy: str = Field(default="", validation_alias="OPENROUTER_PROXY")
162165
lmstudio_proxy: str = Field(default="", validation_alias="LMSTUDIO_PROXY")
163166
llamacpp_proxy: str = Field(default="", validation_alias="LLAMACPP_PROXY")
167+
kimi_proxy: str = Field(default="", validation_alias="KIMI_PROXY")
164168

165169
# ==================== Provider Rate Limiting ====================
166170
provider_rate_limit: int = Field(default=40, validation_alias="PROVIDER_RATE_LIMIT")

providers/defaults.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from config.provider_catalog import (
44
DEEPSEEK_ANTHROPIC_DEFAULT_BASE,
55
DEEPSEEK_DEFAULT_BASE,
6+
KIMI_DEFAULT_BASE,
67
LLAMACPP_DEFAULT_BASE,
78
LMSTUDIO_DEFAULT_BASE,
89
NVIDIA_NIM_DEFAULT_BASE,
@@ -13,6 +14,7 @@
1314
__all__ = (
1415
"DEEPSEEK_ANTHROPIC_DEFAULT_BASE",
1516
"DEEPSEEK_DEFAULT_BASE",
17+
"KIMI_DEFAULT_BASE",
1618
"LLAMACPP_DEFAULT_BASE",
1719
"LMSTUDIO_DEFAULT_BASE",
1820
"NVIDIA_NIM_DEFAULT_BASE",

providers/kimi/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
"""Kimi (Moonshot) provider exports."""
2+
3+
from providers.defaults import KIMI_DEFAULT_BASE
4+
5+
from .client import KimiProvider
6+
7+
__all__ = [
8+
"KIMI_DEFAULT_BASE",
9+
"KimiProvider",
10+
]

providers/kimi/client.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
"""Kimi (Moonshot) provider implementation."""
2+
3+
from __future__ import annotations
4+
5+
from typing import Any
6+
7+
from providers.base import ProviderConfig
8+
from providers.defaults import KIMI_DEFAULT_BASE
9+
from providers.openai_compat import OpenAIChatTransport
10+
11+
from .request import build_request_body
12+
13+
14+
class KimiProvider(OpenAIChatTransport):
15+
"""Kimi provider using the OpenAI-compatible chat completions API."""
16+
17+
def __init__(self, config: ProviderConfig):
18+
super().__init__(
19+
config,
20+
provider_name="KIMI",
21+
base_url=config.base_url or KIMI_DEFAULT_BASE,
22+
api_key=config.api_key,
23+
)
24+
25+
def _build_request_body(
26+
self, request: Any, thinking_enabled: bool | None = None
27+
) -> dict:
28+
return build_request_body(
29+
request,
30+
thinking_enabled=self._is_thinking_enabled(request, thinking_enabled),
31+
)

providers/kimi/request.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
"""Request builder for Kimi (Moonshot) provider."""
2+
3+
from typing import Any
4+
5+
from loguru import logger
6+
7+
from core.anthropic import ReasoningReplayMode, build_base_request_body
8+
from core.anthropic.conversion import OpenAIConversionError
9+
from providers.exceptions import InvalidRequestError
10+
11+
12+
def build_request_body(request_data: Any, *, thinking_enabled: bool) -> dict:
13+
"""Build OpenAI-format request body from Anthropic request."""
14+
logger.debug(
15+
"KIMI_REQUEST: conversion start model={} msgs={}",
16+
getattr(request_data, "model", "?"),
17+
len(getattr(request_data, "messages", [])),
18+
)
19+
try:
20+
body = build_base_request_body(
21+
request_data,
22+
reasoning_replay=ReasoningReplayMode.DISABLED,
23+
)
24+
except OpenAIConversionError as exc:
25+
raise InvalidRequestError(str(exc)) from exc
26+
27+
logger.debug(
28+
"KIMI_REQUEST: conversion done model={} msgs={} tools={}",
29+
body.get("model"),
30+
len(body.get("messages", [])),
31+
len(body.get("tools", [])),
32+
)
33+
return body

providers/registry.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,13 +68,20 @@ def _create_ollama(config: ProviderConfig, _settings: Settings) -> BaseProvider:
6868
return OllamaProvider(config)
6969

7070

71+
def _create_kimi(config: ProviderConfig, _settings: Settings) -> BaseProvider:
72+
from providers.kimi import KimiProvider
73+
74+
return KimiProvider(config)
75+
76+
7177
PROVIDER_FACTORIES: dict[str, ProviderFactory] = {
7278
"nvidia_nim": _create_nvidia_nim,
7379
"open_router": _create_open_router,
7480
"deepseek": _create_deepseek,
7581
"lmstudio": _create_lmstudio,
7682
"llamacpp": _create_llamacpp,
7783
"ollama": _create_ollama,
84+
"kimi": _create_kimi,
7885
}
7986

8087
if set(PROVIDER_DESCRIPTORS) != set(SUPPORTED_PROVIDER_IDS) or set(

0 commit comments

Comments
 (0)