Skip to content
This repository was archived by the owner on Mar 9, 2026. It is now read-only.

Commit ab49577

Browse files
committed
Add a function for centrally handling the engine list, with caching
This commit introduces a few key changes to the `Coder` class in `coder.py`: 1. Removed an unnecessary import and added `functools` to the import list. 2. Added a new method `get_openai_supported_engines` that fetches the list of models supported by the OpenAI API key. This method uses `functools.lru_cache` for caching the result, improving efficiency. 3. Refactored `get_llm_model_name` to use the new `get_openai_supported_engines` method, reducing code duplication and improving readability. These changes should make the `Coder` class more efficient and easier to understand. 🧠💡
1 parent fe7ddda commit ab49577

File tree

1 file changed

+15
-7
lines changed

1 file changed

+15
-7
lines changed

aicodebot/coder.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from langchain.chat_models import ChatOpenAI
44
from openai.api_resources import engine
55
from pathlib import Path
6-
import fnmatch, openai, tiktoken
6+
import fnmatch, functools, openai, tiktoken
77

88
DEFAULT_MAX_TOKENS = 512
99
PRECISE_TEMPERATURE = 0.05
@@ -14,7 +14,6 @@ class Coder:
1414
"""
1515
The Coder class encapsulates the functionality of interacting with LLMs,
1616
git, and the local file system.
17-
1817
"""
1918

2019
@classmethod
@@ -42,6 +41,17 @@ def generate_directory_structure(cls, path, ignore_patterns=None, use_gitignore=
4241

4342
return structure
4443

44+
@staticmethod
45+
@functools.lru_cache
46+
def get_openai_supported_engines():
47+
"""Get a list of the models supported by the OpenAI API key."""
48+
config = read_config()
49+
openai.api_key = config["openai_api_key"]
50+
engines = engine.Engine.list()
51+
out = [engine.id for engine in engines.data]
52+
logger.trace(f"OpenAI supported engines: {out}")
53+
return out
54+
4555
@staticmethod
4656
def get_llm(
4757
model_name,
@@ -73,18 +83,16 @@ def get_llm_model_name(token_size=0):
7383
"gpt-3.5-turbo-16k": 16384,
7484
}
7585

76-
config = read_config()
77-
openai.api_key = config["openai_api_key"]
78-
engines = engine.Engine.list()
86+
engines = Coder.get_openai_supported_engines()
7987

8088
# For some unknown reason, tiktoken often underestimates the token size by ~10%, so let's buffer
8189
token_size = int(token_size * 1.1)
8290

8391
# Try to use GPT-4 if it is supported and the token size is small enough
84-
if "gpt-4" in [engine.id for engine in engines.data] and token_size <= model_options["gpt-4"]:
92+
if "gpt-4" in engines and token_size <= model_options["gpt-4"]:
8593
logger.info(f"Using GPT-4 for token size {token_size}")
8694
return "gpt-4"
87-
elif "gpt-4-32k" in [engine.id for engine in engines.data] and token_size <= model_options["gpt-4-32k"]:
95+
elif "gpt-4-32k" in engines and token_size <= model_options["gpt-4-32k"]:
8896
logger.info(f"Using GPT-4-32k for token size {token_size}")
8997
return "gpt-4-32k"
9098
elif token_size <= model_options["gpt-3.5-turbo"]:

0 commit comments

Comments
 (0)