Skip to content

Commit 2e52dec

Browse files
committed
Anthropic system prompts are collected but never sent to the model
1 parent 456acf7 commit 2e52dec

1 file changed

Lines changed: 12 additions & 6 deletions

File tree

src/repl/serve_anthropic.go

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,12 @@ func (sm *ServerManager) handleAnthropicMessages(w http.ResponseWriter, r *http.
147147
}
148148
}
149149

150+
// Prepend the system prompt as a system-role message so it actually
151+
// reaches the underlying provider via SendMessage.
152+
if systemPrompt.Len() > 0 {
153+
messages = append([]llm.Message{{Role: "system", Content: systemPrompt.String()}}, messages...)
154+
}
155+
150156
// Convert Anthropic tools to OpenAI format
151157
var tools []llm.OpenAITool
152158
if len(req.Tools) > 0 {
@@ -168,14 +174,14 @@ func (sm *ServerManager) handleAnthropicMessages(w http.ResponseWriter, r *http.
168174
}
169175

170176
if req.Stream {
171-
sm.handleAnthropicStreamingMessages(w, r, messages, systemPrompt.String(), req, tools)
177+
sm.handleAnthropicStreamingMessages(w, r, messages, req, tools)
172178
} else {
173-
sm.handleAnthropicNonStreamingMessages(w, r, messages, systemPrompt.String(), req, tools)
179+
sm.handleAnthropicNonStreamingMessages(w, r, messages, req, tools)
174180
}
175181
}
176182

177183
// handleAnthropicStreamingMessages handles streaming Anthropic messages
178-
func (sm *ServerManager) handleAnthropicStreamingMessages(w http.ResponseWriter, r *http.Request, messages []llm.Message, systemPrompt string, req AnthropicMessagesRequest, tools []llm.OpenAITool) {
184+
func (sm *ServerManager) handleAnthropicStreamingMessages(w http.ResponseWriter, r *http.Request, messages []llm.Message, req AnthropicMessagesRequest, tools []llm.OpenAITool) {
179185
w.Header().Set("Content-Type", "text/event-stream")
180186
w.Header().Set("Cache-Control", "no-cache")
181187
w.Header().Set("Connection", "keep-alive")
@@ -213,7 +219,7 @@ func (sm *ServerManager) handleAnthropicStreamingMessages(w http.ResponseWriter,
213219
Model: req.Model,
214220
StopReason: stringPtr("end_turn"),
215221
Usage: &AnthropicUsage{
216-
InputTokens: len(strings.Fields(strings.Join([]string{systemPrompt, fmt.Sprintf("%v", messages[0].Content)}, " "))),
222+
InputTokens: len(strings.Fields(fmt.Sprintf("%v", messages[0].Content))),
217223
OutputTokens: len(strings.Fields(response)),
218224
},
219225
}
@@ -228,7 +234,7 @@ func (sm *ServerManager) handleAnthropicStreamingMessages(w http.ResponseWriter,
228234
}
229235

230236
// handleAnthropicNonStreamingMessages handles non-streaming Anthropic messages
231-
func (sm *ServerManager) handleAnthropicNonStreamingMessages(w http.ResponseWriter, r *http.Request, messages []llm.Message, systemPrompt string, req AnthropicMessagesRequest, tools []llm.OpenAITool) {
237+
func (sm *ServerManager) handleAnthropicNonStreamingMessages(w http.ResponseWriter, r *http.Request, messages []llm.Message, req AnthropicMessagesRequest, tools []llm.OpenAITool) {
232238
client, err := sm.getLLMClient()
233239
if err != nil {
234240
http.Error(w, fmt.Sprintf("LLM init error: %v", err), http.StatusInternalServerError)
@@ -254,7 +260,7 @@ func (sm *ServerManager) handleAnthropicNonStreamingMessages(w http.ResponseWrit
254260
Model: req.Model,
255261
StopReason: stringPtr("end_turn"),
256262
Usage: &AnthropicUsage{
257-
InputTokens: len(strings.Fields(strings.Join([]string{systemPrompt, fmt.Sprintf("%v", messages[0].Content)}, " "))),
263+
InputTokens: len(strings.Fields(fmt.Sprintf("%v", messages[0].Content))),
258264
OutputTokens: len(strings.Fields(response)),
259265
},
260266
}

0 commit comments

Comments
 (0)