Spaces:
Sleeping
Sleeping
ethen-sanchez
commited on
Commit
•
70946b2
1
Parent(s):
e59b0bb
adding chatbot
Browse files
app.py
CHANGED
@@ -3,8 +3,9 @@ from dotenv import load_dotenv
|
|
3 |
load_dotenv() # take environment variables from .env.
|
4 |
import gradio as gr
|
5 |
import openai
|
|
|
|
|
6 |
|
7 |
-
#print("imports complete")
|
8 |
# Define a function to get the AI's reply using the OpenAI API
|
9 |
def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperature=0, message_history=[]):
|
10 |
# Initialize the messages list
|
@@ -27,12 +28,10 @@ def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperatur
|
|
27 |
messages=messages,
|
28 |
temperature=temperature
|
29 |
)
|
30 |
-
|
31 |
# Extract and return the AI's response from the API response
|
32 |
-
#print(completion.choices[0].message.content.strip())
|
33 |
return completion.choices[0].message.content.strip()
|
34 |
|
35 |
-
|
36 |
# Define a function to handle the chat interaction with the AI model
|
37 |
def chat(message, chatbot_messages, history_state):
|
38 |
# Initialize chatbot_messages and history_state if they are not provided
|
@@ -42,15 +41,15 @@ def chat(message, chatbot_messages, history_state):
|
|
42 |
# Try to get the AI's reply using the get_ai_reply function
|
43 |
try:
|
44 |
prompt = """
|
45 |
-
You are bot created to simulate commands.
|
46 |
-
You can only follow commands if they clearly say "simon says".
|
47 |
-
Simulate an action using this notation:
|
48 |
-
:: <action> ::
|
49 |
-
|
50 |
-
Simulate doing nothing with this notation:
|
51 |
-
:: does nothing ::
|
52 |
|
53 |
-
If the user does not give a clear command, reply with ":: does nothing ::"
|
54 |
"""
|
55 |
ai_reply = get_ai_reply(message, model="gpt-3.5-turbo", system_message=prompt.strip(), message_history=history_state)
|
56 |
|
@@ -65,10 +64,9 @@ If the user does not give a clear command, reply with ":: does nothing ::"
|
|
65 |
except Exception as e:
|
66 |
# If an error occurs, raise a Gradio error
|
67 |
raise gr.Error(e)
|
68 |
-
|
69 |
return None, chatbot_messages, history_state
|
70 |
|
71 |
-
|
72 |
# Define a function to launch the chatbot interface using Gradio
|
73 |
def get_chatbot_app():
|
74 |
# Create the Gradio interface using the Blocks layout
|
@@ -86,7 +84,7 @@ def get_chatbot_app():
|
|
86 |
btn.click(chat, inputs=[message, chatbot, history_state], outputs=[message, chatbot, history_state])
|
87 |
# Return the app
|
88 |
return app
|
89 |
-
|
90 |
# Call the launch_chatbot function to start the chatbot interface using Gradio
|
91 |
app = get_chatbot_app()
|
92 |
app.queue() # this is to be able to queue multiple requests at once
|
|
|
3 |
load_dotenv() # take environment variables from .env.
|
4 |
import gradio as gr
|
5 |
import openai
|
6 |
+
import os
|
7 |
+
|
8 |
|
|
|
9 |
# Define a function to get the AI's reply using the OpenAI API
|
10 |
def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperature=0, message_history=[]):
|
11 |
# Initialize the messages list
|
|
|
28 |
messages=messages,
|
29 |
temperature=temperature
|
30 |
)
|
31 |
+
print("get AI reply ran successfully")
|
32 |
# Extract and return the AI's response from the API response
|
|
|
33 |
return completion.choices[0].message.content.strip()
|
34 |
|
|
|
35 |
# Define a function to handle the chat interaction with the AI model
|
36 |
def chat(message, chatbot_messages, history_state):
|
37 |
# Initialize chatbot_messages and history_state if they are not provided
|
|
|
41 |
# Try to get the AI's reply using the get_ai_reply function
|
42 |
try:
|
43 |
prompt = """
|
44 |
+
You are bot created to simulate commands.
|
45 |
+
You can only follow commands if they clearly say "simon says".
|
46 |
+
Simulate an action using this notation:
|
47 |
+
:: <action> ::
|
48 |
+
|
49 |
+
Simulate doing nothing with this notation:
|
50 |
+
:: does nothing ::
|
51 |
|
52 |
+
If the user does not give a clear command, reply with ":: does nothing ::"
|
53 |
"""
|
54 |
ai_reply = get_ai_reply(message, model="gpt-3.5-turbo", system_message=prompt.strip(), message_history=history_state)
|
55 |
|
|
|
64 |
except Exception as e:
|
65 |
# If an error occurs, raise a Gradio error
|
66 |
raise gr.Error(e)
|
67 |
+
print("chat ran successfully")
|
68 |
return None, chatbot_messages, history_state
|
69 |
|
|
|
70 |
# Define a function to launch the chatbot interface using Gradio
|
71 |
def get_chatbot_app():
|
72 |
# Create the Gradio interface using the Blocks layout
|
|
|
84 |
btn.click(chat, inputs=[message, chatbot, history_state], outputs=[message, chatbot, history_state])
|
85 |
# Return the app
|
86 |
return app
|
87 |
+
|
88 |
# Call the launch_chatbot function to start the chatbot interface using Gradio
|
89 |
app = get_chatbot_app()
|
90 |
app.queue() # this is to be able to queue multiple requests at once
|