Skip to content

Commit 705514b

Browse files
committed
Add chat.replythink option
1 parent 786acee commit 705514b

7 files changed

Lines changed: 81 additions & 30 deletions

File tree

src/repl/conf.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ func NewConfigOptions() *ConfigOptions {
7878
// Memory option: load consolidated memory from ~/.config/mai/memory.txt into conversation context
7979
co.RegisterOption("chat.memory", BooleanOption, "Load memory.txt from ~/.config/mai and include in context", "false")
8080
co.RegisterOption("chat.replies", BooleanOption, "Include chat replies when building a single prompt", "true")
81+
co.RegisterOption("chat.replythink", BooleanOption, "Include assistant reasoning in stored chat replies", "false")
8182
co.RegisterOption("chat.save", StringOption, "Session save behavior on exit: always, never, or prompt", "prompt")
8283
co.RegisterOption("chat.system", BooleanOption, "Include chat system messages when building a single prompt", "true")
8384
// Number of most recent messages to include when sending to the LLM (0 = all)
@@ -972,6 +973,8 @@ func (r *REPL) handleUnsetCommand(args []string) (string, error) {
972973
fmt.Fprintf(&output, "Streaming mode %s (reverted to default)\r\n", streamStatus)
973974
case "chat.replies":
974975
output.WriteString("Include replies reverted to default\r\n")
976+
case "chat.replythink":
977+
output.WriteString("Assistant reasoning in replies reverted to default\r\n")
975978
case "llm.think":
976979
output.WriteString("AI reasoning reverted to default\r\n")
977980
case "think.disable":

src/repl/llm/ai.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,10 @@ type Config struct {
123123
// which are trimmed at the start of responses).
124124
ThinkHide bool
125125

126+
// PreserveThink keeps tagged think regions in returned responses even when
127+
// ThinkHide is filtering them from terminal output.
128+
PreserveThink bool
129+
126130
// ShowTPS enables displaying time statistics (time to first token,
127131
// generation time, tokens/second, chars/second) after LLM responses.
128132
ShowTPS bool

src/repl/llm/llm.go

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -225,6 +225,31 @@ func TrimLeadingThink(s string) string {
225225
return ""
226226
}
227227

228+
// StripThink removes all <think>...</think> regions from a complete response.
229+
// If a think block is unterminated, the block and the rest of the response are
230+
// dropped so internal reasoning is not leaked into conversation logs.
231+
func StripThink(s string) string {
232+
if s == "" {
233+
return s
234+
}
235+
var out strings.Builder
236+
for {
237+
idx := strings.Index(s, "<think>")
238+
if idx < 0 {
239+
out.WriteString(s)
240+
break
241+
}
242+
out.WriteString(s[:idx])
243+
s = s[idx+len("<think>"):]
244+
end := strings.Index(s, "</think>")
245+
if end < 0 {
246+
break
247+
}
248+
s = strings.TrimLeft(s[end+len("</think>"):], " \t\r\n")
249+
}
250+
return out.String()
251+
}
252+
228253
// StreamDemo centralizes demo/animation notifications for streaming flows.
229254
// Create one per streaming request and call OnToken with each raw token.
230255
// It will:
@@ -643,7 +668,7 @@ func (c *LLMClient) SendMessage(messages []Message, stream bool, images []string
643668
// client requested hiding of think-regions. When hiding is disabled,
644669
// preserve the tags and their content so the UI can show them.
645670
if err == nil && resp != "" {
646-
if c.Config != nil && c.Config.ThinkHide {
671+
if c.Config != nil && c.Config.ThinkHide && !c.Config.PreserveThink {
647672
// Trim any leading think block and strip all think sections
648673
// from the printed output.
649674
resp = TrimLeadingThink(resp)

src/repl/llm/openai.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,8 @@ func (p *OpenAIProvider) SendMessage(messages []Message, stream bool, images []s
368368
request["reasoning_effort"] = effort
369369
}
370370
case "openrouter":
371-
if reasoning := openRouterReasoning(p.config.ReasoningEffort, p.config.ThinkHide); reasoning != nil {
371+
excludeReasoning := p.config.ThinkHide && !p.config.PreserveThink
372+
if reasoning := openRouterReasoning(p.config.ReasoningEffort, excludeReasoning); reasoning != nil {
372373
request["reasoning"] = reasoning
373374
}
374375
}

src/repl/main.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -534,6 +534,7 @@ func applyConfigOptionsToLLMConfigForTask(config *llm.Config, opts *ConfigOption
534534
} else if opts.IsSet("ui.think") {
535535
config.ThinkHide = !opts.GetBool("ui.think")
536536
}
537+
config.PreserveThink = opts.GetBool("chat.replythink")
537538

538539
// Debug flag: when enabled, show raw messages sent to providers
539540
if opts.Get("repl.debug") != "" {

src/repl/repl.go

Lines changed: 44 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1104,23 +1104,7 @@ func (r *REPL) sendToAI(input string, redirectType string, redirectTarget string
11041104

11051105
// Handle conversation history based on logging and reply settings
11061106
if r.configOptions.GetBool("chat.log") {
1107-
// When logging is enabled, use normal message history behavior
1108-
if r.configOptions.GetBool("chat.replies") {
1109-
// Include all messages
1110-
messages = append(messages, r.messages...)
1111-
} else {
1112-
// Include only user messages
1113-
for _, msg := range r.messages {
1114-
if msg.Role == "user" {
1115-
messages = append(messages, msg)
1116-
} else {
1117-
msg2 := msg
1118-
msg2.Content = ""
1119-
// include empty response from the llm
1120-
messages = append(messages, msg2)
1121-
}
1122-
}
1123-
}
1107+
messages = append(messages, r.messagesForPrompt()...)
11241108
}
11251109

11261110
if r.configOptions.GetBool("mcp.use") {
@@ -1338,7 +1322,7 @@ func (r *REPL) sendToAI(input string, redirectType string, redirectTarget string
13381322
}
13391323

13401324
// Create assistant message
1341-
assistantMessage := llm.Message{Role: "assistant", Content: response}
1325+
assistantMessage := r.assistantMessageForLog(response)
13421326

13431327
if r.configOptions.GetBool("chat.log") {
13441328
// Save to conversation history when logging is enabled
@@ -1428,13 +1412,45 @@ func (r *REPL) regularResponse(input string) error {
14281412
func (r *REPL) getLastAssistantReply() (string, error) {
14291413
// Iterate backwards through messages to find the last assistant message
14301414
for i := len(r.messages) - 1; i >= 0; i-- {
1431-
if r.messages[i].Role == "assistant" {
1432-
return r.messages[i].Content, nil
1415+
if strings.EqualFold(r.messages[i].Role, "assistant") {
1416+
return r.messageForLog(r.messages[i]).Content, nil
14331417
}
14341418
}
14351419
return "", fmt.Errorf("no assistant replies found in conversation history")
14361420
}
14371421

1422+
func (r *REPL) assistantMessageForLog(response string) llm.Message {
1423+
return r.messageForLog(llm.Message{Role: "assistant", Content: response})
1424+
}
1425+
1426+
func (r *REPL) messageForLog(msg llm.Message) llm.Message {
1427+
if strings.EqualFold(msg.Role, "assistant") && !r.configOptions.GetBool("chat.replythink") {
1428+
msg.Content = strings.TrimLeft(llm.StripThink(msg.Content), " \t\r\n")
1429+
}
1430+
return msg
1431+
}
1432+
1433+
func (r *REPL) messagesForLog() []llm.Message {
1434+
messages := make([]llm.Message, 0, len(r.messages))
1435+
for _, msg := range r.messages {
1436+
messages = append(messages, r.messageForLog(msg))
1437+
}
1438+
return messages
1439+
}
1440+
1441+
func (r *REPL) messagesForPrompt() []llm.Message {
1442+
messages := make([]llm.Message, 0, len(r.messages))
1443+
includeReplies := r.configOptions.GetBool("chat.replies")
1444+
for _, msg := range r.messages {
1445+
msg = r.messageForLog(msg)
1446+
if strings.EqualFold(msg.Role, "assistant") && !includeReplies {
1447+
msg.Content = ""
1448+
}
1449+
messages = append(messages, msg)
1450+
}
1451+
return messages
1452+
}
1453+
14381454
// handleShellInput processes input starting with '$' as hybrid AI/shell mode
14391455
func (r *REPL) handleShellInput(input string) error {
14401456
// Handle redirection first, before substitutions
@@ -1758,7 +1774,7 @@ func (r *REPL) executeLLMQueryWithoutStreaming(query string) (string, error) {
17581774

17591775
// Add conversation history if we should include replies
17601776
if r.configOptions.GetBool("chat.replies") && len(r.messages) > 0 {
1761-
messages = append(messages, r.messages...)
1777+
messages = append(messages, r.messagesForPrompt()...)
17621778
}
17631779

17641780
// Add the user query
@@ -1830,7 +1846,7 @@ func (r *REPL) displayConversationLog() string {
18301846
fmt.Fprintf(&output, "[%d] %s: ", i+1, role)
18311847

18321848
// For log display, use a larger truncation limit
1833-
content := msg.Content
1849+
content := r.messageForLog(msg).Content
18341850
if len(content) > 100 {
18351851
content = content[:97] + "..."
18361852
}
@@ -1842,8 +1858,9 @@ func (r *REPL) displayConversationLog() string {
18421858
}
18431859

18441860
fmt.Fprintf(&output, "Total messages: %d\r\n", len(r.messages))
1845-
fmt.Fprintf(&output, "Settings: replies=%t, streaming=%t, reasoning=%s, logging=%t\r\n",
1861+
fmt.Fprintf(&output, "Settings: replies=%t, replythink=%t, streaming=%t, reasoning=%s, logging=%t\r\n",
18461862
r.configOptions.GetBool("chat.replies"),
1863+
r.configOptions.GetBool("chat.replythink"),
18471864
r.configOptions.GetBool("llm.stream"),
18481865
r.configOptions.Get("think.reason"),
18491866
r.configOptions.GetBool("chat.log"))
@@ -1879,7 +1896,7 @@ func (r *REPL) displayFullConversationLog() string {
18791896

18801897
output.WriteString("# Full conversation log:\r\n")
18811898

1882-
for i, msg := range r.messages {
1899+
for i, msg := range r.messagesForLog() {
18831900
role := formatRole(msg.Role)
18841901

18851902
fmt.Fprintf(&output, "\r\n## [%d] %s:\r\n", i+1, role)
@@ -2305,7 +2322,7 @@ func (r *REPL) handleCompactCommand(extra ...string) error {
23052322
var conversationText strings.Builder
23062323
conversationText.WriteString("# Conversation History\n\n")
23072324

2308-
for i, msg := range r.messages {
2325+
for i, msg := range r.messagesForLog() {
23092326
role := formatRole(msg.Role)
23102327
fmt.Fprintf(&conversationText, "## %s %d:\n\n%s\n\n", role, i+1, msg.Content)
23112328
}
@@ -2348,7 +2365,7 @@ func (r *REPL) handleCompactCommand(extra ...string) error {
23482365
}
23492366

23502367
// Create the assistant response message
2351-
assistantMessage := llm.Message{Role: "assistant", Content: response}
2368+
assistantMessage := r.assistantMessageForLog(response)
23522369

23532370
// Replace the conversation with just the compact message and response
23542371
r.messages = []llm.Message{
@@ -2439,7 +2456,7 @@ func (r *REPL) saveConversation(path string) error {
24392456
Messages []llm.Message `json:"messages"`
24402457
}{
24412458
SystemPrompt: r.currentSystemPrompt(),
2442-
Messages: r.messages,
2459+
Messages: r.messagesForLog(),
24432460
}
24442461

24452462
// Convert to JSON

src/repl/sessions.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ func (r *REPL) saveSession(sessionName string) error {
261261
topicFile := filepath.Join(maiDir, "chats", sessionName+".topic")
262262

263263
sess := sessionData{
264-
Messages: r.messages,
264+
Messages: r.messagesForLog(),
265265
Provider: r.configOptions.Get("ai.provider"),
266266
Model: r.configOptions.Get("ai.model"),
267267
BaseURL: r.configOptions.Get("ai.baseurl"),

0 commit comments

Comments
 (0)