from humanlayer import HumanLayer
from openai import OpenAI
import json
from dotenv import load_dotenv
load_dotenv()
hl = HumanLayer(
verbose=True,
run_id="openai-math-example"
)
def add(x: int, y: int) -> int:
"""Add two numbers together."""
return x + y
@hl.require_approval()
def multiply(x: int, y: int) -> int:
"""multiply two numbers"""
return x * y
math_tools = [
{
"type": "function",
"function": {
"name": "multiply",
"description": "multiply two numbers",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number"},
"y": {"type": "number"},
},
"required": ["x", "y"],
},
},
},
{
"type": "function",
"function": {
"name": "add",
"description": "add two numbers",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number"},
"y": {"type": "number"},
},
"required": ["x", "y"],
},
},
},
]
def run_chain(prompt: str, tools: list[dict], tools_map: dict) -> str:
client = OpenAI()
messages = [{"role": "user", "content": prompt}]
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
tools=tools,
tool_choice="auto",
)
while response.choices[0].finish_reason != "stop":
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
if tool_calls:
messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = tools_map[function_name]
function_args = json.loads(tool_call.function.arguments)
try:
function_response = function_to_call(**function_args)
function_response_json = json.dumps(function_response)
except Exception as e:
function_response_json = json.dumps({"error": str(e)})
messages.append({
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response_json,
})
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
tools=tools,
)
return response.choices[0].message.content