ogegadavis254 commited on
Commit
9892e5a
·
verified ·
1 Parent(s): 38b511f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -107
app.py CHANGED
@@ -1,77 +1,37 @@
1
  """ Simple Chatbot
2
  @author: Nigel Gebodh
3
  @email: [email protected]
4
-
5
  """
 
6
  import numpy as np
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
10
- import sys
11
- from dotenv import load_dotenv, dotenv_values
12
- load_dotenv()
13
-
14
-
15
-
16
 
 
17
 
18
  # initialize the client
19
  client = OpenAI(
20
- base_url="https://api-inference.huggingface.co/v1",
21
- api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
22
- )
23
-
24
-
25
-
26
-
27
- #Create supported models
28
- model_links ={
29
- "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
30
- "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
31
- "Gemma-7B":"google/gemma-1.1-7b-it",
32
- "Gemma-2B":"google/gemma-1.1-2b-it",
33
- "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
34
 
 
 
 
35
  }
36
 
37
- #Pull info about the model to display
38
- model_info ={
39
- "Mistral-7B":
40
- {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
41
  \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
42
- 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
43
- "Gemma-7B":
44
- {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
45
- \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
46
- 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
47
- "Gemma-2B":
48
- {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
49
- \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
50
- 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
51
- "Zephyr-7B":
52
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
53
- \nFrom Huggingface: \n\
54
- Zephyr is a series of language models that are trained to act as helpful assistants. \
55
- [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
56
- is the third model in the series, and is a fine-tuned version of google/gemma-7b \
57
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
58
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
59
- "Zephyr-7B-β":
60
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
61
- \nFrom Huggingface: \n\
62
- Zephyr is a series of language models that are trained to act as helpful assistants. \
63
- [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
64
- is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
65
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
66
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
67
- "Meta-Llama-3-8B":
68
- {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
69
- \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
70
- 'logo':'Llama_logo.png'},
71
  }
72
 
73
-
74
- #Random dog images for error message
75
  random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
76
  "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
77
  "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
@@ -86,8 +46,6 @@ random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
86
  "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
87
  "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
88
 
89
-
90
-
91
  def reset_conversation():
92
  '''
93
  Resets Conversation
@@ -95,71 +53,51 @@ def reset_conversation():
95
  st.session_state.conversation = []
96
  st.session_state.messages = []
97
  return None
98
-
99
-
100
-
101
-
102
- # Define the available models
103
- models =[key for key in model_links.keys()]
104
-
105
- # Create the sidebar with the dropdown for model selection
106
- selected_model = st.sidebar.selectbox("Select Model", models)
107
-
108
- #Create a temperature slider
109
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
110
-
111
-
112
- #Add reset button to clear conversation
113
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
114
-
115
 
116
  # Create model description
117
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
118
- st.sidebar.markdown(model_info[selected_model]['description'])
119
- st.sidebar.image(model_info[selected_model]['logo'])
120
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
121
  st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
122
  st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
123
 
 
 
124
 
 
 
125
 
126
-
127
  if "prev_option" not in st.session_state:
128
- st.session_state.prev_option = selected_model
129
 
130
- if st.session_state.prev_option != selected_model:
 
131
  st.session_state.messages = []
132
- # st.write(f"Changed to {selected_model}")
133
- st.session_state.prev_option = selected_model
134
  reset_conversation()
135
 
 
 
136
 
137
-
138
- #Pull in the model we want to use
139
- repo_id = model_links[selected_model]
140
-
141
-
142
- st.subheader(f'AI - {selected_model}')
143
- # st.title(f'ChatBot Using {selected_model}')
144
 
145
  # Set a default model
146
- if selected_model not in st.session_state:
147
- st.session_state[selected_model] = model_links[selected_model]
148
 
149
  # Initialize chat history
150
  if "messages" not in st.session_state:
151
  st.session_state.messages = []
152
 
153
-
154
  # Display chat messages from history on app rerun
155
  for message in st.session_state.messages:
156
  with st.chat_message(message["role"]):
157
  st.markdown(message["content"])
158
 
159
-
160
-
161
  # Accept user input
162
- if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
163
 
164
  # Display user message in chat message container
165
  with st.chat_message("user"):
@@ -167,26 +105,23 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
167
  # Add user message to chat history
168
  st.session_state.messages.append({"role": "user", "content": prompt})
169
 
170
-
171
  # Display assistant response in chat message container
172
  with st.chat_message("assistant"):
173
-
174
  try:
175
  stream = client.chat.completions.create(
176
- model=model_links[selected_model],
177
  messages=[
178
  {"role": m["role"], "content": m["content"]}
179
  for m in st.session_state.messages
180
  ],
181
- temperature=temp_values,#0.5,
182
  stream=True,
183
  max_tokens=3000,
184
  )
185
-
186
  response = st.write_stream(stream)
187
 
188
  except Exception as e:
189
- # st.empty()
190
  response = "😵‍💫 Looks like someone unplugged something!\
191
  \n Either the model space is being updated or something is down.\
192
  \n\
@@ -194,12 +129,9 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
194
  \n\
195
  \n Here's a random pic of a 🐶:"
196
  st.write(response)
197
- random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
198
  st.image(random_dog_pick)
199
  st.write("This was the error message:")
200
  st.write(e)
201
 
202
-
203
-
204
-
205
- st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  """ Simple Chatbot
2
  @author: Nigel Gebodh
3
  @email: [email protected]
 
4
  """
5
+
6
  import numpy as np
7
  import streamlit as st
8
  from openai import OpenAI
9
  import os
10
+ from dotenv import load_dotenv
 
 
 
 
 
11
 
12
+ load_dotenv()
13
 
14
  # initialize the client
15
  client = OpenAI(
16
+ base_url="https://api-inference.huggingface.co/v1",
17
+ api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
18
+ )
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ # Create supported model
21
+ model_link = {
22
+ "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2"
23
  }
24
 
25
+ # Pull info about the model to display
26
+ model_info = {
27
+ "Mistral-7B": {
28
+ 'description': """The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
29
  \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
30
+ 'logo': 'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'
31
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  }
33
 
34
+ # Random dog images for error message
 
35
  random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
36
  "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
37
  "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
 
46
  "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
47
  "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
48
 
 
 
49
  def reset_conversation():
50
  '''
51
  Resets Conversation
 
53
  st.session_state.conversation = []
54
  st.session_state.messages = []
55
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  # Create model description
58
+ st.sidebar.write(f"You're now chatting with **Mistral-7B**")
59
+ st.sidebar.markdown(model_info["Mistral-7B"]['description'])
60
+ st.sidebar.image(model_info["Mistral-7B"]['logo'])
61
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
62
  st.sidebar.markdown("\nLearn how to build this chatbot [here](https://ngebodh.github.io/projects/2024-03-05/).")
63
  st.sidebar.markdown("\nRun into issues? Try the [back-up](https://huggingface.co/spaces/ngebodh/SimpleChatbot-Backup).")
64
 
65
+ #Create a temperature slider
66
+ temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
67
 
68
+ # Add reset button to clear conversation
69
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
70
 
71
+ # Check if the previous option is in session state
72
  if "prev_option" not in st.session_state:
73
+ st.session_state.prev_option = "Mistral-7B"
74
 
75
+ # Reset conversation if the model changes
76
+ if st.session_state.prev_option != "Mistral-7B":
77
  st.session_state.messages = []
78
+ st.session_state.prev_option = "Mistral-7B"
 
79
  reset_conversation()
80
 
81
+ # Pull in the model we want to use
82
+ repo_id = model_link["Mistral-7B"]
83
 
84
+ st.subheader('AI - Mistral-7B')
 
 
 
 
 
 
85
 
86
  # Set a default model
87
+ if "Mistral-7B" not in st.session_state:
88
+ st.session_state["Mistral-7B"] = model_link["Mistral-7B"]
89
 
90
  # Initialize chat history
91
  if "messages" not in st.session_state:
92
  st.session_state.messages = []
93
 
 
94
  # Display chat messages from history on app rerun
95
  for message in st.session_state.messages:
96
  with st.chat_message(message["role"]):
97
  st.markdown(message["content"])
98
 
 
 
99
  # Accept user input
100
+ if prompt := st.chat_input("Hi I'm Mistral-7B, ask me a question"):
101
 
102
  # Display user message in chat message container
103
  with st.chat_message("user"):
 
105
  # Add user message to chat history
106
  st.session_state.messages.append({"role": "user", "content": prompt})
107
 
 
108
  # Display assistant response in chat message container
109
  with st.chat_message("assistant"):
 
110
  try:
111
  stream = client.chat.completions.create(
112
+ model=model_link["Mistral-7B"],
113
  messages=[
114
  {"role": m["role"], "content": m["content"]}
115
  for m in st.session_state.messages
116
  ],
117
+ temperature=temp_values,
118
  stream=True,
119
  max_tokens=3000,
120
  )
121
+
122
  response = st.write_stream(stream)
123
 
124
  except Exception as e:
 
125
  response = "😵‍💫 Looks like someone unplugged something!\
126
  \n Either the model space is being updated or something is down.\
127
  \n\
 
129
  \n\
130
  \n Here's a random pic of a 🐶:"
131
  st.write(response)
132
+ random_dog_pick = 'https://random.dog/' + random_dog[np.random.randint(len(random_dog))]
133
  st.image(random_dog_pick)
134
  st.write("This was the error message:")
135
  st.write(e)
136
 
137
+ st.session_state.messages.append({"role": "assistant", "content": response})