Eric Michael Martinez commited on
Commit
6731167
·
1 Parent(s): 26750af

adding chatbot

Browse files
Files changed (2) hide show
  1. app.py +130 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import examples as chatbot_examples
4
+ from dotenv import load_dotenv
5
+ import os
6
+
7
+ load_dotenv() # take environment variables from .env.
8
+
9
+ # In order to authenticate, secrets must have been set, and the user supplied credentials match
10
+ def auth(username, password):
11
+ app_username = os.getenv("APP_USERNAME")
12
+ app_password = os.getenv("APP_PASSWORD")
13
+
14
+ if app_username and app_password:
15
+ if(username == app_username and password == app_password):
16
+ print("Logged in successfully.")
17
+ return True
18
+ else:
19
+ print("Username or password does not match.")
20
+ else:
21
+ print("Credential secrets not set.")
22
+ return False
23
+
24
+ # Define a function to get the AI's reply using the OpenAI API
25
+ def get_ai_reply(message, model="gpt-3.5-turbo", system_message=None, temperature=0, message_history=[]):
26
+ # Initialize the messages list
27
+ messages = []
28
+
29
+ # Add the system message to the messages list
30
+ if system_message is not None:
31
+ messages += [{"role": "system", "content": system_message}]
32
+
33
+ # Add the message history to the messages list
34
+ if message_history is not None:
35
+ messages += message_history
36
+
37
+ # Add the user's message to the messages list
38
+ messages += [{"role": "user", "content": message}]
39
+
40
+ # Make an API call to the OpenAI ChatCompletion endpoint with the model and messages
41
+ completion = openai.ChatCompletion.create(
42
+ model=model,
43
+ messages=messages,
44
+ temperature=temperature
45
+ )
46
+
47
+ # Extract and return the AI's response from the API response
48
+ return completion.choices[0].message.content.strip()
49
+
50
+ # Define a function to handle the chat interaction with the AI model
51
+ def chat(model, system_message, message, chatbot_messages, history_state):
52
+ # Initialize chatbot_messages and history_state if they are not provided
53
+ chatbot_messages = chatbot_messages or []
54
+ history_state = history_state or []
55
+
56
+ # Try to get the AI's reply using the get_ai_reply function
57
+ try:
58
+ ai_reply = get_ai_reply(message, model=model, system_message=system_message, message_history=history_state)
59
+
60
+ # Append the user's message and the AI's reply to the chatbot_messages list
61
+ chatbot_messages.append((message, ai_reply))
62
+
63
+ # Append the user's message and the AI's reply to the history_state list
64
+ history_state.append({"role": "user", "content": message})
65
+ history_state.append({"role": "assistant", "content": ai_reply})
66
+
67
+ # Return None (empty out the user's message textbox), the updated chatbot_messages, and the updated history_state
68
+ except Exception as e:
69
+ # If an error occurs, raise a Gradio error
70
+ raise gr.Error(e)
71
+
72
+ return None, chatbot_messages, history_state
73
+
74
+ # Define a function to launch the chatbot interface using Gradio
75
+ def get_chatbot_app(additional_examples=[]):
76
+ # Load chatbot examples and merge with any additional examples provided
77
+ examples = chatbot_examples.load_examples(additional=additional_examples)
78
+
79
+ # Define a function to get the names of the examples
80
+ def get_examples():
81
+ return [example["name"] for example in examples]
82
+
83
+ # Define a function to choose an example based on the index
84
+ def choose_example(index):
85
+ if(index!=None):
86
+ system_message = examples[index]["system_message"].strip()
87
+ user_message = examples[index]["message"].strip()
88
+ return system_message, user_message, [], []
89
+ else:
90
+ return "", "", [], []
91
+
92
+ # Create the Gradio interface using the Blocks layout
93
+ with gr.Blocks() as app:
94
+ with gr.Tab("Conversation"):
95
+ with gr.Row():
96
+ with gr.Column():
97
+ # Create a dropdown to select examples
98
+ example_dropdown = gr.Dropdown(get_examples(), label="Examples", type="index")
99
+ # Create a button to load the selected example
100
+ example_load_btn = gr.Button(value="Load")
101
+ # Create a textbox for the system message (prompt)
102
+ system_message = gr.Textbox(label="System Message (Prompt)", value="You are a helpful assistant.")
103
+ with gr.Column():
104
+ # Create a dropdown to select the AI model
105
+ model_selector = gr.Dropdown(
106
+ ["gpt-3.5-turbo"],
107
+ label="Model",
108
+ value="gpt-3.5-turbo"
109
+ )
110
+ # Create a chatbot interface for the conversation
111
+ chatbot = gr.Chatbot(label="Conversation")
112
+ # Create a textbox for the user's message
113
+ message = gr.Textbox(label="Message")
114
+ # Create a state object to store the conversation history
115
+ history_state = gr.State()
116
+ # Create a button to send the user's message
117
+ btn = gr.Button(value="Send")
118
+
119
+ # Connect the example load button to the choose_example function
120
+ example_load_btn.click(choose_example, inputs=[example_dropdown], outputs=[system_message, message, chatbot, history_state])
121
+ # Connect the send button to the chat function
122
+ btn.click(chat, inputs=[model_selector, system_message, message, chatbot, history_state], outputs=[message, chatbot, history_state])
123
+ # Return the app
124
+ return app
125
+
126
+ # Call the launch_chatbot function to start the chatbot interface using Gradio
127
+ # Set the share parameter to False, meaning the interface will not be publicly accessible
128
+ app = get_chatbot_app()
129
+ app.queue() # this is to be able to queue multiple requests at once
130
+ app.launch(auth=auth)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio == 3.27.0
2
+ openai == 0.27.4
3
+ python-dotenv == 1.0.0