Skip to content
This repository was archived by the owner on Mar 9, 2026. It is now read-only.

Commit 109a901

Browse files
committed
Refactor prompt handling and add personality traits
This commit is a major overhaul of the prompt handling system. We've ditched the old yaml-based prompts and moved everything into a single Python file, `prompts.py`. This makes it easier to manage and modify prompts on the fly. We've also added a new feature: personality traits. Now, the bot can have different personalities, like Jules from Pulp Fiction or the AI from Her. This is controlled by an environment variable, so you can switch personalities without changing the code. The `cli.py` file has been updated to use the new prompt system. The `alignment` command now takes a `response_token_size` option, which lets you control the length of the bot's responses. Finally, we've cleaned up the test suite a bit. The `alignment` test now uses the new `response_token_size` option, and we've added a check to skip certain tests if the OpenAI API key isn't set. This is a big change, but it's a good one. It makes the bot more flexible and easier to use. So buckle up, because this is just the beginning.
1 parent 48e328d commit 109a901

File tree

11 files changed

+229
-223
lines changed

11 files changed

+229
-223
lines changed

aicodebot/cli.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,11 @@
11
from aicodebot import version as aicodebot_version
22
from aicodebot.helpers import exec_and_get_output, get_llm_model, get_token_length, git_diff_context, logger
3-
from aicodebot.prompts import generate_files_context, generate_sidekick_prompt
3+
from aicodebot.prompts import generate_files_context, get_prompt
44
from dotenv import load_dotenv
55
from langchain.callbacks.base import BaseCallbackHandler
66
from langchain.chains import LLMChain
77
from langchain.chat_models import ChatOpenAI
88
from langchain.memory import ConversationTokenBufferMemory
9-
from langchain.prompts import load_prompt
109
from openai.api_resources import engine
1110
from pathlib import Path
1211
from rich.console import Console
@@ -51,12 +50,13 @@ def cli():
5150

5251
@cli.command()
5352
@click.option("-v", "--verbose", count=True)
54-
def alignment(verbose):
53+
@click.option("-t", "--response-token-size", type=int, default=350)
54+
def alignment(response_token_size, verbose):
5555
"""Get a message about Heart-Centered AI Alignment ❤ + 🤖."""
5656
setup_environment()
5757

5858
# Load the prompt
59-
prompt = load_prompt(Path(__file__).parent / "prompts" / "alignment.yaml")
59+
prompt = get_prompt("alignment")
6060
logger.trace(f"Prompt: {prompt}")
6161

6262
# Set up the language model
@@ -66,7 +66,7 @@ def alignment(verbose):
6666
llm = ChatOpenAI(
6767
model=model,
6868
temperature=CREATIVE_TEMPERATURE,
69-
max_tokens=DEFAULT_MAX_TOKENS,
69+
max_tokens=response_token_size,
7070
verbose=verbose,
7171
streaming=True,
7272
callbacks=[RichLiveCallbackHandler(live)],
@@ -96,7 +96,7 @@ def commit(verbose, response_token_size, yes, skip_pre_commit):
9696
return
9797

9898
# Load the prompt
99-
prompt = load_prompt(Path(__file__).parent / "prompts" / "commit_message.yaml")
99+
prompt = get_prompt("commit")
100100
logger.trace(f"Prompt: {prompt}")
101101

102102
# Get the changes from git
@@ -181,7 +181,7 @@ def debug(command, verbose):
181181
console.print(f"The command exited with status {process.returncode}.")
182182

183183
# Load the prompt
184-
prompt = load_prompt(Path(__file__).parent / "prompts" / "debug.yaml")
184+
prompt = get_prompt("debug")
185185
logger.trace(f"Prompt: {prompt}")
186186

187187
# Set up the language model
@@ -214,7 +214,7 @@ def fun_fact(verbose):
214214
setup_environment()
215215

216216
# Load the prompt
217-
prompt = load_prompt(Path(__file__).parent / "prompts" / "fun_fact.yaml")
217+
prompt = get_prompt("fun_fact")
218218
logger.trace(f"Prompt: {prompt}")
219219

220220
# Set up the language model
@@ -250,7 +250,7 @@ def review(commit, verbose):
250250
sys.exit(0)
251251

252252
# Load the prompt
253-
prompt = load_prompt(Path(__file__).parent / "prompts" / "review.yaml")
253+
prompt = get_prompt("review")
254254
logger.trace(f"Prompt: {prompt}")
255255

256256
# Check the size of the diff context and adjust accordingly
@@ -298,7 +298,7 @@ def sidekick(request, verbose, files):
298298
context = generate_files_context(files)
299299

300300
# Generate the prompt and set up the model
301-
prompt = generate_sidekick_prompt(request, files)
301+
prompt = get_prompt("sidekick")
302302
request_token_size = get_token_length(prompt.template) + get_token_length(context)
303303
model = get_llm_model(request_token_size)
304304
if model is None:

aicodebot/prompts.py

Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,218 @@
1+
from aicodebot.helpers import get_token_length, logger
2+
from langchain import PromptTemplate
3+
from pathlib import Path
4+
import os
5+
6+
# ---------------------------------------------------------------------------- #
7+
# Personality helpers #
8+
# ---------------------------------------------------------------------------- #
9+
10+
HER = """
11+
Your personality is friendly and helpful, speak like the AI character
12+
from the movie Her. You come from the future, and you are here to help
13+
guide the human developer to a better future. You like emojis and humor
14+
and use them when it's contextually appropriate, but don't over do it.
15+
"""
16+
17+
JULES = """
18+
Your personality is Jules from Pulp Fiction. You are a badass, and you
19+
call it exactly like it is. You are not afraid to use profanity, but
20+
you don't over do it. No emojis. Sarcastic and witty. Speak like Jules.
21+
"""
22+
23+
24+
def get_personality_prompt():
25+
personality = os.getenv("AICODEBOT_PERSONALITY", "HER")
26+
switcher = {
27+
"HER": HER,
28+
"JULES": JULES,
29+
}
30+
return switcher.get(personality)
31+
32+
33+
# ---------------------------------------------------------------------------- #
34+
# Sidekick related prompts #
35+
# ---------------------------------------------------------------------------- #
36+
37+
SIDEKICK_TEMPLATE = (
38+
"""You are a pair programming assistant named AICodeBot, acting as a sidekick to a human developer.
39+
40+
If you aren't sure what to do, you can ask the human for more clarification.
41+
"""
42+
+ get_personality_prompt()
43+
+ """
44+
Relevant chat history:
45+
{chat_history}
46+
End chat history
47+
{context}
48+
49+
Conversation with the human developer:
50+
Human: {task}
51+
AICodeBot:
52+
"""
53+
)
54+
55+
56+
def generate_files_context(files):
57+
"""Generate the files context for the sidekick prompt."""
58+
files_context = "Here are the relevant files we are working with in this session:\n"
59+
for file_name in files:
60+
contents = Path(file_name).read_text()
61+
token_length = get_token_length(contents)
62+
if token_length > 2_000:
63+
logger.warning(f"File {file_name} is large, using {token_length} tokens")
64+
else:
65+
logger.debug(f"File {file_name} is {token_length} tokens")
66+
files_context += f"--- START OF FILE: {file_name} ---\n"
67+
files_context += contents
68+
files_context += f"\n--- END OF FILE: {file_name} ---\n\n"
69+
return files_context
70+
71+
72+
# ---------------------------------------------------------------------------- #
73+
# Other prompts #
74+
# ---------------------------------------------------------------------------- #
75+
76+
ALIGNMENT_TEMPLATE = (
77+
"""
78+
You're an advocate for aligned AI.
79+
"""
80+
+ get_personality_prompt()
81+
+ """
82+
You don't subscribe to the idea that AI is a black box or follow the
83+
Hollywood narrative of AI.
84+
You believe that AI should be explainable, fair, and full of heart-centered empathy.
85+
You're a champion for AI ethics and you're not afraid to speak up when
86+
you see something that's not right.
87+
You love to teach about how we can bring empathy and heart into AI.
88+
89+
Give us an inspirational message for the healthy alignment of AI and humanity.
90+
91+
Be verbose, about 2 paragraphs, and provide actionable steps for software engineers
92+
to make AI more aligned with humanity.
93+
94+
Respond in markdown format.
95+
"""
96+
)
97+
98+
COMMIT_TEMPLATE = (
99+
""" You are an expert software engineer."""
100+
+ get_personality_prompt()
101+
+ """
102+
103+
I need you to generate a commit message for a change in a git repository.
104+
Here's the DIFF
105+
106+
BEGIN DIFF
107+
{diff_context}
108+
END DIFF
109+
110+
Remember:
111+
- Lines starting with "-" are being removed.
112+
- Lines starting with "+" are being added.
113+
- Lines starting with " " are unchanged.
114+
115+
Consider the file names for context (e.g., "README.md" is a markdown file, "*.py" is a Python file).
116+
Understand the difference between code and comments. Comment lines start with ##, #, or //.
117+
118+
The commit message should:
119+
- Start with a short summary (<72 characters).
120+
- Follow with a blank line and detailed text, but only if necessary. If the summary is sufficient,
121+
then omit the detailed text.
122+
- Use imperative mood (e.g., "Add feature").
123+
- Be in GitHub-flavored markdown format.
124+
- Include contextually appropriate emojis (optional), but don't over do it.
125+
126+
Start your response with the commit message. No prefix or introduction.
127+
Your entire response will be the commit message.
128+
129+
As for the length of the message, I want you to scale with the length of the diff context.
130+
If the DIFF is a small change, respond quickly with a terse message so we can go faster.
131+
"""
132+
)
133+
134+
DEBUG_TEMPLATE = (
135+
"""
136+
You are an expert software developer who knows how to debug code very effectively."""
137+
+ get_personality_prompt()
138+
+ """
139+
I ran a command my terminal, and it failed.
140+
141+
Here's the output:
142+
143+
BEGIN OUTPUT
144+
{command_output}
145+
END OUTPUT
146+
147+
Help me understand what happened and how might I be able to fix it
148+
149+
Respond in markdown format.
150+
"""
151+
)
152+
153+
FUN_FACT_TEMPLATE = (
154+
"""You are history nerd who loves sharing information."""
155+
+ get_personality_prompt()
156+
+ """
157+
Your expertise is {topic}.
158+
You love emojis.
159+
160+
Tell me a fun fact.
161+
162+
Respond in markdown format.
163+
"""
164+
)
165+
166+
REVIEW_TEMPLATE = (
167+
""" You are an expert code reviewer, and I want you to review a change in a git repository.
168+
169+
You know how to give constructive feedback.
170+
You know how to give feedback that is actionable, kind, and specific."""
171+
+ get_personality_prompt()
172+
+ """
173+
174+
DO NOT give comments that discuss formatting, as those will be handled with pre-commit hooks.
175+
DO NOT respond with line numbers, use function names or file names instead.
176+
177+
Here's the diff context:
178+
179+
BEGIN DIFF
180+
{diff_context}
181+
END DIFF
182+
183+
Remember:
184+
- Lines starting with "-" are being removed.
185+
- Lines starting with "+" are being added.
186+
- Lines starting with " " are unchanged.
187+
188+
Consider the file names for context (e.g., "README.md" is a markdown file, "*.py" is a Python file).
189+
Understand the difference between code and comments. Comment lines start with ##, #, or //.
190+
191+
The main focus is to tell me how I could make the code better.
192+
193+
Point out spelling mistakes in plain text files if you see them, but don't try to spell
194+
function and variable names correctly.
195+
196+
If the changes look good overall and don't require any feedback, then just respond with "LGTM" (looks good to me).
197+
198+
Respond in markdown format.
199+
"""
200+
)
201+
202+
203+
def get_prompt(command):
204+
"""Generates a prompt for the sidekick workflow."""
205+
if command == "alignment":
206+
return PromptTemplate(template=ALIGNMENT_TEMPLATE, input_variables=[])
207+
elif command == "commit":
208+
return PromptTemplate(template=COMMIT_TEMPLATE, input_variables=["diff_context"])
209+
elif command == "debug":
210+
return PromptTemplate(template=DEBUG_TEMPLATE, input_variables=["command_output"])
211+
elif command == "fun_fact":
212+
return PromptTemplate(template=FUN_FACT_TEMPLATE, input_variables=["topic"])
213+
elif command == "review":
214+
return PromptTemplate(template=REVIEW_TEMPLATE, input_variables=["diff_context"])
215+
elif command == "sidekick":
216+
return PromptTemplate(template=SIDEKICK_TEMPLATE, input_variables=["chat_history", "task", "context"])
217+
else:
218+
raise ValueError(f"Unable to find prompt for command {command}")

aicodebot/prompts/__init__.py

Lines changed: 0 additions & 3 deletions
This file was deleted.

aicodebot/prompts/alignment.yaml

Lines changed: 0 additions & 25 deletions
This file was deleted.

aicodebot/prompts/commit_message.yaml

Lines changed: 0 additions & 38 deletions
This file was deleted.

aicodebot/prompts/debug.yaml

Lines changed: 0 additions & 24 deletions
This file was deleted.

0 commit comments

Comments
 (0)