Niansuh commited on
Commit
2991a67
Β·
verified Β·
1 Parent(s): 7b3b747

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +147 -161
app.py CHANGED
@@ -1,163 +1,149 @@
1
- import os
2
- from dotenv import find_dotenv, load_dotenv
3
  import streamlit as st
4
- from typing import Generator
5
- from groq import Groq
6
-
7
- _ = load_dotenv(find_dotenv())
8
- st.set_page_config(page_icon="πŸ“ƒ", layout="wide", page_title="Groq & LLaMA3 Chat Bot...")
9
-
10
- def icon(emoji: str):
11
- """Shows an emoji as a Notion-style page icon."""
12
- st.write(
13
- f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
14
- unsafe_allow_html=True,
15
- )
16
-
17
- # icon("⚑️")
18
-
19
- st.subheader("Groq Chat with LLaMA3 App", divider="rainbow", anchor=False)
20
-
21
- # Create a settings page
22
- def settings_page():
23
- st.title("Settings")
24
- api_keys = st.text_input("Enter your API keys (comma-separated):")
25
- if st.button("Save"):
26
- os.environ['GROQ_API_KEYS'] = api_keys
27
- st.success("API keys saved successfully!")
28
-
29
- # Create a main page
30
- def main_page():
31
- # Get the API keys from the environment variable
32
- api_keys = os.environ.get('GROQ_API_KEYS')
33
- if api_keys is None:
34
- st.error("Please set your API keys in the settings page.")
35
- return
36
-
37
- # Initialize the Groq client with the first API key
38
- client = None
39
- for api_key in api_keys.split(','):
40
- try:
41
- client = Groq(api_key=api_key)
42
- break
43
- except Exception as e:
44
- st.error(f"Failed to initialize client with API key {api_key}: {e}")
45
- continue
46
-
47
- if client is None:
48
- st.error("Failed to initialize client with any API key")
49
- st.stop()
50
-
51
- # Initialize chat history and selected model
52
- if "messages" not in st.session_state:
53
- st.session_state.messages = []
54
-
55
- if "selected_model" not in st.session_state:
56
- st.session_state.selected_model = None
57
-
58
- # Define model details
59
- models = {
60
- "llama3-70b-8192": {"name": "LLaMA3-70b", "tokens": 8192, "developer": "Meta"},
61
- "llama3-8b-8192": {"name": "LLaMA3-8b", "tokens": 8192, "developer": "Meta"},
62
- "llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
63
- "gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
64
- "mixtral-8x7b-32768": {
65
- "name": "Mixtral-8x7b-Instruct-v0.1",
66
- "tokens": 32768,
67
- "developer": "Mistral",
68
- },
69
- }
70
-
71
- # Layout for model selection and max_tokens slider
72
- col1, col2 = st.columns([1, 3]) # Adjust the ratio to make the first column smaller
73
-
74
- with col1:
75
- model_option = st.selectbox(
76
- "Choose a model:",
77
- options=list(models.keys()),
78
- format_func=lambda x: models[x]["name"],
79
- index=0, # Default to the first model in the list
80
- )
81
- max_tokens_range = models[model_option]["tokens"]
82
- max_tokens = st.slider(
83
- "Max Tokens:",
84
- min_value=512,
85
- max_value=max_tokens_range,
86
- value=min(32768, max_tokens_range),
87
- step=512,
88
- help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  )
90
- system_message = {}
91
- if system_prompt := st.text_input("System Prompt"):
92
- system_message = {"role": "system", "content": system_prompt}
93
-
94
- # Detect model change and clear chat history if model has changed
95
- if st.session_state.selected_model != model_option:
96
- st.session_state.messages = []
97
- st.session_state.selected_model = model_option
98
-
99
- # Add a "Clear Chat" button
100
- if st.button("Clear Chat"):
101
- st.session_state.messages = []
102
-
103
- # Display chat messages from history on app rerun
104
- for message in st.session_state.messages:
105
- avatar = "πŸ”‹" if message["role"] == "assistant" else "πŸ§‘β€πŸ’»"
106
- with st.chat_message(message["role"], avatar=avatar):
107
- st.markdown(message["content"])
108
-
109
- def generate_chat_responses(chat_completion) -> Generator[str, None, None]:
110
- """Yield chat response content from the Groq API response."""
111
- for chunk in chat_completion:
112
- if chunk.choices[0].delta.content:
113
- yield chunk.choices[0].delta.content
114
-
115
- if prompt := st.chat_input("Enter your prompt here..."):
116
- st.session_state.messages.append({"role": "user", "content": prompt})
117
-
118
- with st.chat_message("user", avatar="πŸ§‘β€πŸ’»"):
119
- st.markdown(prompt)
120
-
121
- messages=[
122
- {"role": m["role"], "content": m["content"]}
123
- for m in st.session_state.messages]
124
- if system_message:
125
- messages.insert(0,system_message)
126
- # Fetch response from Groq API
127
- try:
128
- chat_completion = client.chat.completions.create(
129
- model=model_option,
130
- messages=messages,
131
- max_tokens=max_tokens,
132
- stream=True,
133
- )
134
-
135
- # Use the generator function with st.write_stream
136
- with st.chat_message("assistant", avatar="πŸ”‹"):
137
- chat_responses_generator = generate_chat_responses(chat_completion)
138
- full_response = st.write_stream(chat_responses_generator)
139
- except Exception as e:
140
- st.error(e, icon="❌")
141
-
142
- # Append the full response to session_state.messages
143
- if isinstance(full_response, str):
144
- st.session_state.messages.append(
145
- {"role": "assistant", "content": full_response}
146
- )
147
- else:
148
- # Handle the case where full_response is not a string
149
- combined_response = "\n".join(str(item) for item in full_response)
150
- st.session_state.messages.append(
151
- {"role": "assistant", "content": combined_response}
152
- )
153
-
154
- # Create a sidebar with a settings button
155
- with st.sidebar:
156
- if st.button("Settings"):
157
- settings_page()
158
- else:
159
- main_page()
160
-
161
- # If the user is not in the settings page, show the main page
162
- if not st.sidebar.button("Settings"):
163
- main_page()
 
1
+ import numpy as np
 
2
  import streamlit as st
3
+ from openai import OpenAI
4
+ import os
5
+ import json
6
+ from dotenv import load_dotenv
7
+
8
+ load_dotenv()
9
+
10
+ # Initialize the client
11
+ client = OpenAI(
12
+ base_url=os.environ.get('BASE_URL'), # Fetch base_url from environment variables
13
+ api_key=os.environ.get('API_KEY') # Fetch API key from environment variables
14
+ )
15
+
16
+ # Create supported models
17
+ model_links = {
18
+ "Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
19
+ }
20
+
21
+ # Random dog images for error message
22
+ random_dog = [
23
+ "0f476473-2d8b-415e-b944-483768418a95.jpg",
24
+ "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
25
+ "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
26
+ "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
27
+ "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
28
+ "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
29
+ "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
30
+ "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
31
+ "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
32
+ "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
33
+ "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
34
+ "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
35
+ "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"
36
+ ]
37
+
38
+ history_file = 'chat_histories.json'
39
+
40
+ def load_history():
41
+ if os.path.exists(history_file):
42
+ with open(history_file, 'r') as f:
43
+ return json.load(f)
44
+ return {}
45
+
46
+ def save_history(histories):
47
+ with open(history_file, 'w') as f:
48
+ json.dump(histories, f)
49
+
50
+ def reset_conversation():
51
+ '''
52
+ Resets Conversation
53
+ '''
54
+ st.session_state.messages = []
55
+ st.session_state.current_chat_name = None
56
+ return None
57
+
58
+ # Set up the Streamlit page configuration
59
+ st.set_page_config(page_icon="πŸ“ƒ", layout="wide", page_title="GPT-CHATBOT.ru")
60
+
61
+ # Display the header
62
+ st.title("GPT-CHATBOT.ru")
63
+
64
+ # Initialize session state attributes
65
+ if "messages" not in st.session_state:
66
+ st.session_state.messages = []
67
+
68
+ if "chat_histories" not in st.session_state:
69
+ st.session_state.chat_histories = load_history()
70
+
71
+ if "current_chat_name" not in st.session_state:
72
+ st.session_state.current_chat_name = None
73
+
74
+ # Define the available models
75
+ models = [key for key in model_links.keys()]
76
+
77
+ # Create the sidebar with the dropdown for model selection
78
+ selected_model = st.sidebar.selectbox("Select Model", models)
79
+
80
+ # Create a temperature slider
81
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, 0.5)
82
+
83
+ # Add reset button to clear conversation
84
+ st.sidebar.button('Reset Chat', on_click=reset_conversation)
85
+
86
+ # Create a chat history dropdown
87
+ chat_history = st.sidebar.selectbox("Select Chat History", ["Current Chat"] + list(st.session_state.chat_histories.keys()))
88
+
89
+ if chat_history != "Current Chat":
90
+ st.session_state.messages = st.session_state.chat_histories[chat_history]
91
+ else:
92
+ if selected_model not in st.session_state:
93
+ st.session_state[selected_model] = model_links[selected_model]
94
+
95
+ # Create a system prompt input
96
+ system_prompt = st.sidebar.text_input("System Prompt", value="", help="Optional system prompt for the chat model.")
97
+
98
+ # Display chat messages from history on app rerun
99
+ for message in st.session_state.messages:
100
+ avatar = "πŸ”‹" if message["role"] == "assistant" else "πŸ§‘β€πŸ’»"
101
+ with st.chat_message(message["role"], avatar=avatar):
102
+ st.markdown(message["content"])
103
+
104
+ # Accept user input
105
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
106
+ with st.chat_message("user", avatar="πŸ§‘β€πŸ’»"):
107
+ st.markdown(prompt)
108
+ st.session_state.messages.append({"role": "user", "content": prompt})
109
+
110
+ try:
111
+ # Construct the list of messages with an optional system prompt
112
+ messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
113
+ if system_prompt:
114
+ messages.insert(0, {"role": "system", "content": system_prompt})
115
+
116
+ # Make the API request
117
+ stream = client.chat.completions.create(
118
+ model=model_links[selected_model],
119
+ messages=messages,
120
+ temperature=temp_values,
121
+ stream=True,
122
+ max_tokens=3000,
123
  )
124
+ response = st.write_stream(stream)
125
+
126
+ except Exception as e:
127
+ response = "πŸ˜΅β€πŸ’« Looks like someone unplugged something!\
128
+ \n Either the model space is being updated or something is down.\
129
+ \n\
130
+ \n Try again later. \
131
+ \n\
132
+ \n Here's a random pic of a 🐢:"
133
+ st.write(response)
134
+ random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
135
+ st.image(random_dog_pick)
136
+ st.write("This was the error message:")
137
+ st.write(e)
138
+
139
+ st.session_state.messages.append({"role": "assistant", "content": response})
140
+
141
+ # Automatically name and save chat history
142
+ if not st.session_state.current_chat_name:
143
+ st.session_state.current_chat_name = f"Chat_{len(st.session_state.chat_histories) + 1}"
144
+ st.session_state.chat_histories[st.session_state.current_chat_name] = st.session_state.messages
145
+ save_history(st.session_state.chat_histories)
146
+
147
+ st.sidebar.write(f"You're now chatting with **{selected_model}**")
148
+ st.sidebar.markdown("*Generated content may be inaccurate or false.*")
149
+ st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")