forked from humanlayer/humanlayer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllama_toolkit.py
More file actions
123 lines (96 loc) · 3.97 KB
/
llama_toolkit.py
File metadata and controls
123 lines (96 loc) · 3.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import functools
import os
import json
from typing import Any, Callable, Dict
from langchain.agents import AgentType, initialize_agent
from langchain.callbacks.base import BaseCallbackHandler
from langchain.prompts import PromptTemplate
from langchain.schema import AgentAction
from langchain.tools import StructuredTool
from langchain_ollama import OllamaLLM
from dotenv import load_dotenv
load_dotenv()
class FunctionInvocationHandler(BaseCallbackHandler):
"""Handles function invocation display and logging"""
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Display function invocation in a structured format"""
tool_name = serialized.get("name", "unknown_tool")
try:
# Parse input and format it nicely
if isinstance(input_str, str):
try:
input_dict = json.loads(input_str)
except:
input_dict = {"input": input_str}
else:
input_dict = input_str
print(f"\nInvoking: `{tool_name}` with arguments:")
for key, value in input_dict.items():
print(f" - {key}: {value}")
except Exception as e:
print(f"\nInvoking: `{tool_name}` with: {input_str}")
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Display function output"""
print(f"Result: {output}\n")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Log agent's thought process"""
print(f"Thought: {action.log}")
class LlamaToolkit:
"""Toolkit for managing custom functions with Llama"""
def __init__(self, model_name: str = "llama3.1", temperature: float = 0.1):
self.tools = []
self.model_name = model_name
self.temperature = temperature
self.callback_handler = FunctionInvocationHandler()
def add_function(self, name: str = None, description: str = None):
"""A decorator to add a function to the toolkit"""
def decorator(func: Callable):
nonlocal name, description
if name is None:
name = func.__name__
if description is None:
description = func.__doc__ or f"Tool that calls the {name} function"
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
return f"Error in {name}: {str(e)}"
tool = StructuredTool.from_function(
func=wrapped_func, name=name, description=description, return_direct=False
)
self.tools.append(tool)
return wrapped_func
return decorator
def create_agent(self):
"""Create a Llama agent with the registered tools"""
llm = OllamaLLM(model=self.model_name, temperature=self.temperature)
# Custom prompt template
template = """you are math expert that can answer math questions.
When performing tasks:
1. If a suitable tool is available, you may use it
2. If no tool is available, use your own intelligence to solve the problem
3. Never refuse to help just because a tool is missing
Available functions:
{tools}
Human: {input}
Assistant: Let me help you solve this step by step.
{agent_scratchpad}
"""
prompt = PromptTemplate(input_variables=["tools", "input", "agent_scratchpad"], template=template)
return initialize_agent(
tools=self.tools,
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
callbacks=[self.callback_handler],
handle_parsing_errors=True,
max_iterations=5,
agent_kwargs={"prompt": prompt},
)