Skip to content
This repository was archived by the owner on Mar 9, 2026. It is now read-only.

Commit eebc315

Browse files
committed
Bug fix: Create a buffer so we aren't close the 4k tokenization limit. I've seen cases where the tiktoken token count doesn't agree with OpenAI's count.
1 parent a1de539 commit eebc315

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

aicodebot/cli.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def commit(verbose, response_token_size, yes, skip_pre_commit):
108108
# Bigger models coming soon
109109
console.print("The diff context is too large to review. 😞")
110110
sys.exit(1)
111-
elif prompt_token_size + response_token_size > 4_000:
111+
elif prompt_token_size + response_token_size > 3_500: # It's actually 4k, but we want a buffer
112112
model = "gpt-3.5-turbo-16k" # supports 16k tokens but is a bit slower and more expensive
113113
else:
114114
model = DEFAULT_MODEL # gpt-3.5-turbo supports 4k tokens
@@ -231,7 +231,7 @@ def review(commit, verbose):
231231
# Bigger models coming soon
232232
console.print("The diff context is too large to review. 😞")
233233
sys.exit(1)
234-
elif prompt_token_size + response_token_size > 4_000:
234+
elif prompt_token_size + response_token_size > 3_500: # It's actually 4k, but we want a buffer
235235
model = "gpt-3.5-turbo-16k" # supports 16k tokens but is a bit slower and more expensive
236236
else:
237237
model = DEFAULT_MODEL # gpt-3.5-turbo supports 4k tokens

0 commit comments

Comments
 (0)