Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,6 @@ def reset_conversation():
|
|
19 |
Resets Conversation
|
20 |
'''
|
21 |
st.session_state.messages = []
|
22 |
-
return None
|
23 |
|
24 |
# Set the temperature value directly in the code
|
25 |
temperature = 0.5
|
@@ -31,42 +30,44 @@ st.button('Reset Chat', on_click=reset_conversation)
|
|
31 |
if "messages" not in st.session_state:
|
32 |
st.session_state.messages = []
|
33 |
|
|
|
|
|
|
|
|
|
|
|
34 |
# Accept user input
|
35 |
if prompt := st.chat_input("Type your message here..."):
|
36 |
|
37 |
# Display user message in chat message container
|
38 |
with st.chat_message("user"):
|
39 |
st.markdown(prompt)
|
|
|
40 |
# Add user message to chat history
|
41 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
42 |
|
43 |
-
#
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
messages_for_api = [
|
48 |
-
{"role": "system", "content": "As a helpful and friendly assistant, provide concise and accurate responses to the user's queries."}
|
49 |
-
] + st.session_state.messages # Add system message and user messages to the list
|
50 |
-
|
51 |
-
response = client.chat.completions.create(
|
52 |
-
model=model_link,
|
53 |
-
messages=messages_for_api,
|
54 |
-
temperature=temperature,
|
55 |
-
max_tokens=150 # Adjust the max tokens according to your needs
|
56 |
-
)
|
57 |
-
|
58 |
-
# Get the response content
|
59 |
-
assistant_response = response["choices"][0]["message"]["content"]
|
60 |
-
st.markdown(assistant_response)
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
65 |
|
66 |
-
|
67 |
-
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
68 |
|
69 |
-
# Display
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
Resets Conversation
|
20 |
'''
|
21 |
st.session_state.messages = []
|
|
|
22 |
|
23 |
# Set the temperature value directly in the code
|
24 |
temperature = 0.5
|
|
|
30 |
if "messages" not in st.session_state:
|
31 |
st.session_state.messages = []
|
32 |
|
33 |
+
# Display chat messages from history on app rerun
|
34 |
+
for message in st.session_state.messages:
|
35 |
+
with st.chat_message(message["role"]):
|
36 |
+
st.markdown(message["content"])
|
37 |
+
|
38 |
# Accept user input
|
39 |
if prompt := st.chat_input("Type your message here..."):
|
40 |
|
41 |
# Display user message in chat message container
|
42 |
with st.chat_message("user"):
|
43 |
st.markdown(prompt)
|
44 |
+
|
45 |
# Add user message to chat history
|
46 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
47 |
|
48 |
+
# Interact with the model
|
49 |
+
try:
|
50 |
+
# Send the user and system messages to the API
|
51 |
+
messages_for_api = [{"role": "system", "content": "You are a helpful assistant."}] + st.session_state.messages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
+
response = client.chat.completions.create(
|
54 |
+
model=model_link,
|
55 |
+
messages=messages_for_api,
|
56 |
+
temperature=temperature,
|
57 |
+
max_tokens=150 # Adjust the max tokens according to your needs
|
58 |
+
)
|
59 |
|
60 |
+
assistant_response = response["choices"][0]["message"]["content"]
|
|
|
61 |
|
62 |
+
# Display assistant response in chat message container
|
63 |
+
with st.chat_message("assistant"):
|
64 |
+
st.markdown(assistant_response)
|
65 |
+
|
66 |
+
# Append the assistant's response to the chat history
|
67 |
+
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
# Display error message to user
|
71 |
+
with st.chat_message("assistant"):
|
72 |
+
st.markdown("Sorry, I couldn't process your request. Please try again later.")
|
73 |
+
st.write(e)
|