Update app.py
Browse files
app.py
CHANGED
@@ -1,163 +1,149 @@
|
|
1 |
-
import
|
2 |
-
from dotenv import find_dotenv, load_dotenv
|
3 |
import streamlit as st
|
4 |
-
from
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
#
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
)
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
st.
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
117 |
-
|
118 |
-
with st.chat_message("user", avatar="π§βπ»"):
|
119 |
-
st.markdown(prompt)
|
120 |
-
|
121 |
-
messages=[
|
122 |
-
{"role": m["role"], "content": m["content"]}
|
123 |
-
for m in st.session_state.messages]
|
124 |
-
if system_message:
|
125 |
-
messages.insert(0,system_message)
|
126 |
-
# Fetch response from Groq API
|
127 |
-
try:
|
128 |
-
chat_completion = client.chat.completions.create(
|
129 |
-
model=model_option,
|
130 |
-
messages=messages,
|
131 |
-
max_tokens=max_tokens,
|
132 |
-
stream=True,
|
133 |
-
)
|
134 |
-
|
135 |
-
# Use the generator function with st.write_stream
|
136 |
-
with st.chat_message("assistant", avatar="π"):
|
137 |
-
chat_responses_generator = generate_chat_responses(chat_completion)
|
138 |
-
full_response = st.write_stream(chat_responses_generator)
|
139 |
-
except Exception as e:
|
140 |
-
st.error(e, icon="β")
|
141 |
-
|
142 |
-
# Append the full response to session_state.messages
|
143 |
-
if isinstance(full_response, str):
|
144 |
-
st.session_state.messages.append(
|
145 |
-
{"role": "assistant", "content": full_response}
|
146 |
-
)
|
147 |
-
else:
|
148 |
-
# Handle the case where full_response is not a string
|
149 |
-
combined_response = "\n".join(str(item) for item in full_response)
|
150 |
-
st.session_state.messages.append(
|
151 |
-
{"role": "assistant", "content": combined_response}
|
152 |
-
)
|
153 |
-
|
154 |
-
# Create a sidebar with a settings button
|
155 |
-
with st.sidebar:
|
156 |
-
if st.button("Settings"):
|
157 |
-
settings_page()
|
158 |
-
else:
|
159 |
-
main_page()
|
160 |
-
|
161 |
-
# If the user is not in the settings page, show the main page
|
162 |
-
if not st.sidebar.button("Settings"):
|
163 |
-
main_page()
|
|
|
1 |
+
import numpy as np
|
|
|
2 |
import streamlit as st
|
3 |
+
from openai import OpenAI
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
# Initialize the client
|
11 |
+
client = OpenAI(
|
12 |
+
base_url=os.environ.get('BASE_URL'), # Fetch base_url from environment variables
|
13 |
+
api_key=os.environ.get('API_KEY') # Fetch API key from environment variables
|
14 |
+
)
|
15 |
+
|
16 |
+
# Create supported models
|
17 |
+
model_links = {
|
18 |
+
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
19 |
+
}
|
20 |
+
|
21 |
+
# Random dog images for error message
|
22 |
+
random_dog = [
|
23 |
+
"0f476473-2d8b-415e-b944-483768418a95.jpg",
|
24 |
+
"1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
|
25 |
+
"526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
|
26 |
+
"1326984c-39b0-492c-a773-f120d747a7e2.jpg",
|
27 |
+
"42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
|
28 |
+
"8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
|
29 |
+
"ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
|
30 |
+
"027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
|
31 |
+
"08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
|
32 |
+
"0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
|
33 |
+
"0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
|
34 |
+
"6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
|
35 |
+
"bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"
|
36 |
+
]
|
37 |
+
|
38 |
+
history_file = 'chat_histories.json'
|
39 |
+
|
40 |
+
def load_history():
|
41 |
+
if os.path.exists(history_file):
|
42 |
+
with open(history_file, 'r') as f:
|
43 |
+
return json.load(f)
|
44 |
+
return {}
|
45 |
+
|
46 |
+
def save_history(histories):
|
47 |
+
with open(history_file, 'w') as f:
|
48 |
+
json.dump(histories, f)
|
49 |
+
|
50 |
+
def reset_conversation():
|
51 |
+
'''
|
52 |
+
Resets Conversation
|
53 |
+
'''
|
54 |
+
st.session_state.messages = []
|
55 |
+
st.session_state.current_chat_name = None
|
56 |
+
return None
|
57 |
+
|
58 |
+
# Set up the Streamlit page configuration
|
59 |
+
st.set_page_config(page_icon="π", layout="wide", page_title="GPT-CHATBOT.ru")
|
60 |
+
|
61 |
+
# Display the header
|
62 |
+
st.title("GPT-CHATBOT.ru")
|
63 |
+
|
64 |
+
# Initialize session state attributes
|
65 |
+
if "messages" not in st.session_state:
|
66 |
+
st.session_state.messages = []
|
67 |
+
|
68 |
+
if "chat_histories" not in st.session_state:
|
69 |
+
st.session_state.chat_histories = load_history()
|
70 |
+
|
71 |
+
if "current_chat_name" not in st.session_state:
|
72 |
+
st.session_state.current_chat_name = None
|
73 |
+
|
74 |
+
# Define the available models
|
75 |
+
models = [key for key in model_links.keys()]
|
76 |
+
|
77 |
+
# Create the sidebar with the dropdown for model selection
|
78 |
+
selected_model = st.sidebar.selectbox("Select Model", models)
|
79 |
+
|
80 |
+
# Create a temperature slider
|
81 |
+
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
|
82 |
+
|
83 |
+
# Add reset button to clear conversation
|
84 |
+
st.sidebar.button('Reset Chat', on_click=reset_conversation)
|
85 |
+
|
86 |
+
# Create a chat history dropdown
|
87 |
+
chat_history = st.sidebar.selectbox("Select Chat History", ["Current Chat"] + list(st.session_state.chat_histories.keys()))
|
88 |
+
|
89 |
+
if chat_history != "Current Chat":
|
90 |
+
st.session_state.messages = st.session_state.chat_histories[chat_history]
|
91 |
+
else:
|
92 |
+
if selected_model not in st.session_state:
|
93 |
+
st.session_state[selected_model] = model_links[selected_model]
|
94 |
+
|
95 |
+
# Create a system prompt input
|
96 |
+
system_prompt = st.sidebar.text_input("System Prompt", value="", help="Optional system prompt for the chat model.")
|
97 |
+
|
98 |
+
# Display chat messages from history on app rerun
|
99 |
+
for message in st.session_state.messages:
|
100 |
+
avatar = "π" if message["role"] == "assistant" else "π§βπ»"
|
101 |
+
with st.chat_message(message["role"], avatar=avatar):
|
102 |
+
st.markdown(message["content"])
|
103 |
+
|
104 |
+
# Accept user input
|
105 |
+
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
106 |
+
with st.chat_message("user", avatar="π§βπ»"):
|
107 |
+
st.markdown(prompt)
|
108 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
109 |
+
|
110 |
+
try:
|
111 |
+
# Construct the list of messages with an optional system prompt
|
112 |
+
messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
|
113 |
+
if system_prompt:
|
114 |
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
115 |
+
|
116 |
+
# Make the API request
|
117 |
+
stream = client.chat.completions.create(
|
118 |
+
model=model_links[selected_model],
|
119 |
+
messages=messages,
|
120 |
+
temperature=temp_values,
|
121 |
+
stream=True,
|
122 |
+
max_tokens=3000,
|
123 |
)
|
124 |
+
response = st.write_stream(stream)
|
125 |
+
|
126 |
+
except Exception as e:
|
127 |
+
response = "π΅βπ« Looks like someone unplugged something!\
|
128 |
+
\n Either the model space is being updated or something is down.\
|
129 |
+
\n\
|
130 |
+
\n Try again later. \
|
131 |
+
\n\
|
132 |
+
\n Here's a random pic of a πΆ:"
|
133 |
+
st.write(response)
|
134 |
+
random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
|
135 |
+
st.image(random_dog_pick)
|
136 |
+
st.write("This was the error message:")
|
137 |
+
st.write(e)
|
138 |
+
|
139 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
140 |
+
|
141 |
+
# Automatically name and save chat history
|
142 |
+
if not st.session_state.current_chat_name:
|
143 |
+
st.session_state.current_chat_name = f"Chat_{len(st.session_state.chat_histories) + 1}"
|
144 |
+
st.session_state.chat_histories[st.session_state.current_chat_name] = st.session_state.messages
|
145 |
+
save_history(st.session_state.chat_histories)
|
146 |
+
|
147 |
+
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
148 |
+
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
149 |
+
st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|