Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ from streamlit_feedback import streamlit_feedback
|
|
7 |
|
8 |
from rag_chain.chain import get_rag_chain
|
9 |
|
|
|
10 |
client = Client()
|
11 |
|
12 |
# Streamlit page configuration
|
@@ -113,19 +114,19 @@ if prompt:
|
|
113 |
|
114 |
try:
|
115 |
partial_message = ""
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
122 |
except openai.BadRequestError:
|
123 |
st.warning(openai_api_error_message, icon="π")
|
124 |
st.stop()
|
125 |
except Exception as e:
|
126 |
st.warning(base_error_message, icon="π")
|
127 |
st.stop()
|
128 |
-
message_placeholder.markdown(partial_message)
|
129 |
|
130 |
# Add the full response to the history
|
131 |
st.session_state["history"].append((prompt, partial_message))
|
@@ -133,7 +134,7 @@ if prompt:
|
|
133 |
# Add AI message to memory after the response is generated
|
134 |
memory.chat_memory.add_ai_message(AIMessage(content=partial_message))
|
135 |
|
136 |
-
#
|
137 |
st.session_state["messages"].append(ChatMessage(
|
138 |
role="assistant", content=partial_message))
|
139 |
|
@@ -158,9 +159,9 @@ st.sidebar.markdown(
|
|
158 |
Thank you! Let's get started. π
|
159 |
|
160 |
**Note**:\n\n
|
161 |
-
|
162 |
-
This AI assistant is designed to provide guidance and general information about the services offered by Tall Tree Health.
|
163 |
-
It is not intended for seeking medical advice and should not be used as such.
|
164 |
The information provided by this generative AI technology cannot replace the advice of qualified healthcare professionals.
|
165 |
"""
|
166 |
)
|
@@ -172,6 +173,7 @@ if st.session_state.get("run_id"):
|
|
172 |
run_id = st.session_state.run_id
|
173 |
feedback = streamlit_feedback(
|
174 |
feedback_type=feedback_option,
|
|
|
175 |
key=f"feedback_{run_id}",
|
176 |
)
|
177 |
score_mappings = {
|
@@ -196,6 +198,7 @@ if st.session_state.get("run_id"):
|
|
196 |
run_id,
|
197 |
feedback_type_str,
|
198 |
score=score,
|
|
|
199 |
)
|
200 |
st.session_state.feedback = {
|
201 |
"feedback_id": str(feedback_record.id),
|
|
|
7 |
|
8 |
from rag_chain.chain import get_rag_chain
|
9 |
|
10 |
+
# Langsmith client for the feedback system
|
11 |
client = Client()
|
12 |
|
13 |
# Streamlit page configuration
|
|
|
114 |
|
115 |
try:
|
116 |
partial_message = ""
|
117 |
+
# Collect runs for feedback using Langsmith
|
118 |
+
with st.spinner(" "), collect_runs() as cb:
|
119 |
+
for chunk in chain.stream({"message": prompt}):
|
120 |
+
partial_message += chunk
|
121 |
+
message_placeholder.markdown(partial_message + "|")
|
122 |
+
st.session_state.run_id = cb.traced_runs[0].id
|
123 |
+
message_placeholder.markdown(partial_message)
|
124 |
except openai.BadRequestError:
|
125 |
st.warning(openai_api_error_message, icon="π")
|
126 |
st.stop()
|
127 |
except Exception as e:
|
128 |
st.warning(base_error_message, icon="π")
|
129 |
st.stop()
|
|
|
130 |
|
131 |
# Add the full response to the history
|
132 |
st.session_state["history"].append((prompt, partial_message))
|
|
|
134 |
# Add AI message to memory after the response is generated
|
135 |
memory.chat_memory.add_ai_message(AIMessage(content=partial_message))
|
136 |
|
137 |
+
# Add the full response to the message history
|
138 |
st.session_state["messages"].append(ChatMessage(
|
139 |
role="assistant", content=partial_message))
|
140 |
|
|
|
159 |
Thank you! Let's get started. π
|
160 |
|
161 |
**Note**:\n\n
|
162 |
+
|
163 |
+
This AI assistant is designed to provide guidance and general information about the services offered by Tall Tree Health.
|
164 |
+
It is not intended for seeking medical advice and should not be used as such.
|
165 |
The information provided by this generative AI technology cannot replace the advice of qualified healthcare professionals.
|
166 |
"""
|
167 |
)
|
|
|
173 |
run_id = st.session_state.run_id
|
174 |
feedback = streamlit_feedback(
|
175 |
feedback_type=feedback_option,
|
176 |
+
optional_text_label="[Optional] Please provide an explanation",
|
177 |
key=f"feedback_{run_id}",
|
178 |
)
|
179 |
score_mappings = {
|
|
|
198 |
run_id,
|
199 |
feedback_type_str,
|
200 |
score=score,
|
201 |
+
comment=feedback.get("text"),
|
202 |
)
|
203 |
st.session_state.feedback = {
|
204 |
"feedback_id": str(feedback_record.id),
|