-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathutils.py
109 lines (89 loc) · 3.41 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from openai import OpenAI
import os
from dotenv import load_dotenv
import base64
import streamlit as st
import json
from datetime import datetime
load_dotenv()
api_key = os.getenv("OPEN_API_KEY")
client = OpenAI(api_key=api_key)
def get_answer(messages,tools=None):
system_message = [{"role": "system", "content": f"You are a Google Assistant helper. Execute the instructions the user gives you, you can guess preferences without asking the user. Just explain your choices after having finished. Current time: {datetime.now().strftime('%Y-%m-%d %H:%M')}"}]
messages = system_message + messages
response = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools
)
return response.choices[0].message
def execute_tool_calls(service,messages, tool_calls, function_dict):
tool_results = []
# Execute tools one by one
for tool_call in tool_calls:
# Get the name
name_of_function_gpt_wants_to_call = tool_call.function.name
# Get the arguments
arguments = json.loads(tool_call.function.arguments)
# Check if the function name exists
if name_of_function_gpt_wants_to_call in function_dict:
# Get the function
function_to_call = function_dict[name_of_function_gpt_wants_to_call]
# Call the function with the provided arguments
result = function_to_call(service,**arguments)
# Add the result to the list
tool_results.append(
{
"tool_call_id": tool_call.id,
"role": 'tool',
"name": name_of_function_gpt_wants_to_call,
"content": str(result),
}
)
return tool_results
def ask_chatgpt_with_tools(service,messages, function_dict, tools, verbose=False):
# Get the answer from the model
message = get_answer(messages, tools=tools)
messages.append(message)
# Execute the tools and get new reply while tools are requested
while message.tool_calls:
tool_calls = message.tool_calls
if tool_calls:
tool_results = execute_tool_calls(service,messages, tool_calls, function_dict)
for tool_result in tool_results:
messages.append(tool_result)
# Get the new answer
message = get_answer(messages, tools=tools)
messages.append(message)
if verbose:
for message in messages:
print(message)
return message.content
def speech_to_text(audio_data):
with open(audio_data, "rb") as audio_file:
transcript = client.audio.transcriptions.create(
model="whisper-1",
response_format="text",
file=audio_file
)
return transcript
def text_to_speech(input_text):
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=input_text
)
webm_file_path = "temp_audio_play.mp3"
with open(webm_file_path, "wb") as f:
response.stream_to_file(webm_file_path)
return webm_file_path
def autoplay_audio(file_path: str):
with open(file_path, "rb") as f:
data = f.read()
b64 = base64.b64encode(data).decode("utf-8")
md = f"""
<audio autoplay>
<source src="data:audio/mp3;base64,{b64}" type="audio/mp3">
</audio>
"""
st.markdown(md, unsafe_allow_html=True)