Skip to content
This repository was archived by the owner on Mar 9, 2026. It is now read-only.

Commit fce0488

Browse files
committed
Refactor configuration and dependency management. Resolves #19
In the realm of code, change is the only constant. In this instance, we have embarked on a journey of transformation, a metamorphosis of sorts. The configuration management has been refactored, shifting from environment variables to a more robust YAML-based configuration file. This change brings with it the promise of greater flexibility and ease of use. In addition, the dependencies have been pruned. The python-dotenv package, once a vital part of our ecosystem, has been replaced by PyYAML, a more versatile and powerful tool for handling configuration data. The code has been adjusted to accommodate these changes, with the OPENAI_API_KEY now being read from the new configuration file. This key is the bridge between our humble code and the vast knowledge of the OpenAI API, and its handling is of utmost importance. In the grand tapestry of our codebase, these changes may seem minor, but remember, even the smallest thread can change the pattern of the weave.
1 parent 1b346b8 commit fce0488

File tree

4 files changed

+109
-65
lines changed

4 files changed

+109
-65
lines changed

aicodebot/cli.py

Lines changed: 89 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,13 @@
11
from aicodebot import version as aicodebot_version
2-
from aicodebot.helpers import exec_and_get_output, get_llm_model, get_token_length, git_diff_context, logger
2+
from aicodebot.helpers import (
3+
exec_and_get_output,
4+
get_llm_model,
5+
get_token_length,
6+
git_diff_context,
7+
logger,
8+
read_config,
9+
)
310
from aicodebot.prompts import generate_files_context, get_prompt
4-
from dotenv import load_dotenv
511
from langchain.callbacks.base import BaseCallbackHandler
612
from langchain.chains import LLMChain
713
from langchain.chat_models import ChatOpenAI
@@ -12,14 +18,15 @@
1218
from rich.live import Live
1319
from rich.markdown import Markdown
1420
from rich.style import Style
15-
import click, datetime, openai, os, random, subprocess, sys, tempfile, webbrowser
21+
import click, datetime, openai, os, random, subprocess, sys, tempfile, webbrowser, yaml
1622

1723
# ----------------------------- Default settings ----------------------------- #
1824

1925
DEFAULT_MAX_TOKENS = 512
2026
PRECISE_TEMPERATURE = 0
2127
CREATIVE_TEMPERATURE = 0.7
2228
DEFAULT_SPINNER = "point"
29+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
2330

2431
# ----------------------- Setup for rich console output ---------------------- #
2532

@@ -53,7 +60,7 @@ def cli():
5360
@click.option("-t", "--response-token-size", type=int, default=350)
5461
def alignment(response_token_size, verbose):
5562
"""Get a message about Heart-Centered AI Alignment ❤ + 🤖."""
56-
setup_environment()
63+
config = setup_config()
5764

5865
# Load the prompt
5966
prompt = get_prompt("alignment")
@@ -66,6 +73,7 @@ def alignment(response_token_size, verbose):
6673
llm = ChatOpenAI(
6774
model=model,
6875
temperature=CREATIVE_TEMPERATURE,
76+
openai_api_key=config["OPENAI_API_KEY"],
6977
max_tokens=response_token_size,
7078
verbose=verbose,
7179
streaming=True,
@@ -85,7 +93,7 @@ def alignment(response_token_size, verbose):
8593
@click.option("--skip-pre-commit", is_flag=True, help="Skip running pre-commit (otherwise run it if it is found).")
8694
def commit(verbose, response_token_size, yes, skip_pre_commit):
8795
"""Generate a commit message based on your changes."""
88-
setup_environment()
96+
config = setup_config()
8997

9098
# Check if pre-commit is installed and .pre-commit-config.yaml exists
9199
if not skip_pre_commit and Path(".pre-commit-config.yaml").exists():
@@ -126,7 +134,13 @@ def commit(verbose, response_token_size, yes, skip_pre_commit):
126134
)
127135

128136
# Set up the language model
129-
llm = ChatOpenAI(model=model, temperature=PRECISE_TEMPERATURE, max_tokens=DEFAULT_MAX_TOKENS, verbose=verbose)
137+
llm = ChatOpenAI(
138+
model=model,
139+
openai_api_key=config["OPENAI_API_KEY"],
140+
temperature=PRECISE_TEMPERATURE,
141+
max_tokens=DEFAULT_MAX_TOKENS,
142+
verbose=verbose,
143+
)
130144

131145
# Set up the chain
132146
chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
@@ -159,7 +173,7 @@ def commit(verbose, response_token_size, yes, skip_pre_commit):
159173
@click.option("-v", "--verbose", count=True)
160174
def debug(command, verbose):
161175
"""Run a command and debug the output."""
162-
setup_environment()
176+
config = setup_config()
163177

164178
# Run the command and capture its output
165179
command_str = " ".join(command)
@@ -194,6 +208,7 @@ def debug(command, verbose):
194208
llm = ChatOpenAI(
195209
model=model,
196210
temperature=PRECISE_TEMPERATURE,
211+
openai_api_key=config["OPENAI_API_KEY"],
197212
max_tokens=DEFAULT_MAX_TOKENS,
198213
verbose=verbose,
199214
streaming=True,
@@ -211,7 +226,7 @@ def debug(command, verbose):
211226
@click.option("-v", "--verbose", count=True)
212227
def fun_fact(verbose):
213228
"""Get a fun fact about programming and artificial intelligence."""
214-
setup_environment()
229+
config = setup_config()
215230

216231
# Load the prompt
217232
prompt = get_prompt("fun_fact")
@@ -225,6 +240,7 @@ def fun_fact(verbose):
225240
model=model,
226241
temperature=PRECISE_TEMPERATURE,
227242
max_tokens=DEFAULT_MAX_TOKENS / 2,
243+
openai_api_key=config["OPENAI_API_KEY"],
228244
verbose=verbose,
229245
streaming=True,
230246
callbacks=[RichLiveCallbackHandler(live)],
@@ -242,7 +258,7 @@ def fun_fact(verbose):
242258
@click.option("-v", "--verbose", count=True)
243259
def review(commit, verbose):
244260
"""Do a code review, with [un]staged changes, or a specified commit."""
245-
setup_environment()
261+
config = setup_config()
246262

247263
diff_context = git_diff_context(commit)
248264
if not diff_context:
@@ -264,6 +280,7 @@ def review(commit, verbose):
264280
llm = ChatOpenAI(
265281
model=model,
266282
temperature=PRECISE_TEMPERATURE,
283+
openai_api_key=config["OPENAI_API_KEY"],
267284
max_tokens=response_token_size,
268285
verbose=verbose,
269286
streaming=True,
@@ -276,6 +293,27 @@ def review(commit, verbose):
276293
chain.run(diff_context)
277294

278295

296+
@cli.command()
297+
@click.option("--openai-api-key", "-k", help="Your OpenAI API key")
298+
def setup(openai_api_key):
299+
"""Set up the configuration file with your OpenAI API key
300+
If the config file already exists, it will ask you if you want to remove it and recreate it.
301+
"""
302+
config_file = Path(read_config.CONFIG_FILE)
303+
if config_file.exists():
304+
if not click.confirm(
305+
f"The config file already exists at {config_file}. Do you want to remove it and recreate it?"
306+
):
307+
console.print("Setup cancelled. 🚫")
308+
return
309+
310+
# Remove the existing config file
311+
config_file.unlink()
312+
313+
# Call the setup_config function with the provided arguments
314+
setup_config(openai_api_key)
315+
316+
279317
@cli.command
280318
@click.option("--request", "-r", help="What to ask your sidekick to do")
281319
@click.option("-v", "--verbose", count=True)
@@ -288,7 +326,7 @@ def sidekick(request, verbose, files):
288326

289327
console.print("This is an experimental feature. Play with it, but don't count on it.", style=warning_style)
290328

291-
setup_environment()
329+
config = setup_config()
292330

293331
# Pull in context. Right now it's just the contents of files that we passed in.
294332
# Soon, we could add vector embeddings of:
@@ -308,6 +346,7 @@ def sidekick(request, verbose, files):
308346

309347
llm = ChatOpenAI(
310348
model=model,
349+
openai_api_key=config["OPENAI_API_KEY"],
311350
temperature=PRECISE_TEMPERATURE,
312351
max_tokens=DEFAULT_MAX_TOKENS * 2,
313352
verbose=verbose,
@@ -353,64 +392,55 @@ def sidekick(request, verbose, files):
353392
# ---------------------------------------------------------------------------- #
354393

355394

356-
def setup_environment():
357-
# Load environment variables from the config file
358-
config_file = Path(Path.home() / ".aicodebot")
359-
load_dotenv(config_file)
360-
361-
if os.getenv("OPENAI_API_KEY"):
362-
logger.debug("OPENAI_API_KEY environment variable is set")
363-
openai.api_key = os.getenv("OPENAI_API_KEY")
364-
return True
365-
366-
openai_api_key_url = "https://platform.openai.com/account/api-keys"
395+
def setup_config(openai_api_key=OPENAI_API_KEY):
396+
config = read_config()
397+
if config:
398+
openai.api_key = config["OPENAI_API_KEY"]
399+
return config
400+
elif openai_api_key:
401+
openai.api_key = openai_api_key
367402

368403
console.print(
369-
"[bold red]The OPENAI_API_KEY environment variable is not set.[/bold red]\n"
370-
f"The OpenAI API key is required to use aicodebot. You can get one for free on the OpenAI website.\n"
371-
f"Let's create a config file for you at {config_file}"
404+
f"[bold red]The config file does not exist.[/bold red]\n"
405+
f"Let's set that up for you at {read_config.CONFIG_FILE}\n"
372406
)
373407

374-
if click.confirm("Open the OpenAI API keys page for you in a browser?"):
375-
webbrowser.open(openai_api_key_url)
376-
377-
if click.confirm(f"Create the {config_file} file for you?"):
378-
api_key = click.prompt("Please enter your OpenAI API key")
379-
380-
# Validate the API key and check if it supports GPT-4
381-
openai.api_key = api_key
382-
try:
383-
click.echo("Validating the API key, and checking if GPT-4 is supported...")
384-
engines = engine.Engine.list()
385-
logger.trace(f"Engines: {engines}")
386-
gpt_4_supported = "true" if "gpt-4" in [engine.id for engine in engines.data] else "false"
387-
if gpt_4_supported == "true":
388-
click.echo("✅ The API key is valid and supports GPT-4.")
389-
else:
390-
click.echo("✅ The API key is valid, but does not support GPT-4. GPT-3.5 will be used instead.")
391-
except Exception as e:
392-
raise click.ClickException(f"Failed to validate the API key: {str(e)}") from e
393-
394-
# Copy .env.template to .env and insert the API key and gpt_4_supported
395-
template_file = Path(__file__).parent / ".aicodebot.template"
396-
with Path.open(template_file, "r") as template, Path.open(config_file, "w") as env:
397-
for line in template:
398-
if line.startswith("OPENAI_API_KEY="):
399-
env.write(f"OPENAI_API_KEY={api_key}\n")
400-
elif line.startswith("GPT_4_SUPPORTED="):
401-
env.write(f"GPT_4_SUPPORTED={gpt_4_supported}\n")
402-
else:
403-
env.write(line)
408+
if not openai.api_key:
409+
openai_api_key_url = "https://platform.openai.com/account/api-keys"
404410

405411
console.print(
406-
f"[bold green]Created {config_file} with your OpenAI API key and GPT-4 support status.[/bold green] "
407-
"Now, please re-run aicodebot and let's get started!"
412+
"First, an OpenAI API key is required to use AICodeBot. You can get one for free on the OpenAI website.\n"
408413
)
409-
sys.exit(0)
410414

411-
raise click.ClickException(
412-
"🛑 Please set an API key in the OPENAI_API_KEY environment variable or in a .aicodebot file."
415+
if click.confirm("Open the OpenAI API keys page for you in a browser?"):
416+
webbrowser.open(openai_api_key_url)
417+
418+
openai.api_key = click.prompt("Please enter your OpenAI API key")
419+
420+
# Validate the API key and check if it supports GPT-4
421+
try:
422+
click.echo("Validating the API key, and checking if GPT-4 is supported...")
423+
engines = engine.Engine.list()
424+
logger.trace(f"Engines: {engines}")
425+
gpt_4_supported = "gpt-4" in [engine.id for engine in engines.data]
426+
if gpt_4_supported:
427+
click.echo("✅ The API key is valid and supports GPT-4.")
428+
else:
429+
click.echo("✅ The API key is valid, but does not support GPT-4. GPT-3.5 will be used instead.")
430+
except Exception as e:
431+
raise click.ClickException(f"Failed to validate the API key: {str(e)}") from e
432+
433+
config_data = {"config_version": 1, "OPENAI_API_KEY": openai.api_key, "gpt_4_supported": gpt_4_supported}
434+
435+
with Path.open(read_config.CONFIG_FILE, "w") as f:
436+
yaml.dump(config_data, f)
437+
438+
console.print(
439+
f"[bold green]Created {read_config.CONFIG_FILE} with your OpenAI API key.[/bold green] "
440+
"Now, please re-run aicodebot and let's get started!"
413441
)
442+
sys.exit(0)
443+
return config_data
414444

415445

416446
class RichLiveCallbackHandler(BaseCallbackHandler):

aicodebot/helpers.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from loguru import logger
22
from pathlib import Path
3-
import os, subprocess, sys, tiktoken
3+
import os, subprocess, sys, tiktoken, yaml
44

55
# ---------------------------------------------------------------------------- #
66
# Global logging configuration for loguru #
@@ -30,7 +30,8 @@ def get_llm_model(token_size=0):
3030
"gpt-3.5-turbo": 4096,
3131
"gpt-3.5-turbo-16k": 16384,
3232
}
33-
gpt_4_supported = os.getenv("GPT_4_SUPPORTED") == "true"
33+
config = read_config()
34+
gpt_4_supported = config["gpt_4_supported"]
3435

3536
# For some unknown reason, tiktoken often underestimates the token size by ~10%, so let's buffer
3637
token_size = int(token_size * 1.1)
@@ -121,3 +122,16 @@ def exec_and_get_output(command):
121122
if result.returncode != 0:
122123
raise Exception(f"Command '{' '.join(command)}' failed with error:\n{result.stderr}") # noqa: TRY002
123124
return result.stdout
125+
126+
127+
def read_config():
128+
if read_config.CONFIG_FILE.exists():
129+
logger.debug(f"Config file {read_config.CONFIG_FILE} exists")
130+
with Path(read_config.CONFIG_FILE).open("r") as f:
131+
return yaml.safe_load(f)
132+
else:
133+
logger.debug(f"Config file {read_config.CONFIG_FILE} does not exist")
134+
return None
135+
136+
137+
read_config.CONFIG_FILE = Path(Path.home() / ".aicodebot.yaml")

requirements/requirements.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,6 @@ GitPython
1111
langchain
1212
loguru
1313
openai
14-
python-dotenv
14+
pyyaml
1515
rich
1616
tiktoken

requirements/requirements.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,10 +79,10 @@ pydantic==1.10.9
7979
# openapi-schema-pydantic
8080
pygments==2.15.1
8181
# via rich
82-
python-dotenv==1.0.0
83-
# via -r requirements.in
8482
pyyaml==6.0
85-
# via langchain
83+
# via
84+
# -r requirements.in
85+
# langchain
8686
regex==2023.6.3
8787
# via tiktoken
8888
requests==2.31.0

0 commit comments

Comments
 (0)