L137 and L141 updated
Browse files
app.py
CHANGED
@@ -134,11 +134,13 @@ if prompt := st.chat_input(initial_input):
|
|
134 |
try:
|
135 |
llm_response = llama2_7b_ysa(question)
|
136 |
except:
|
137 |
-
|
|
|
138 |
else:
|
139 |
-
|
140 |
"Sorry for the delay. We are in the progress of fine-tune the model. βοΈ"
|
141 |
)
|
|
|
142 |
|
143 |
finetuned_llm_guess = ["from_llm", question, llm_response, 0]
|
144 |
final_ref.loc[-1] = finetuned_llm_guess
|
|
|
134 |
try:
|
135 |
llm_response = llama2_7b_ysa(question)
|
136 |
except:
|
137 |
+
st.warning("Sorry, the inference endpoint is temporarily down. π")
|
138 |
+
llm_response = "NA."
|
139 |
else:
|
140 |
+
st.warning(
|
141 |
"Sorry for the delay. We are in the progress of fine-tune the model. βοΈ"
|
142 |
)
|
143 |
+
llm_response = "NA"
|
144 |
|
145 |
finetuned_llm_guess = ["from_llm", question, llm_response, 0]
|
146 |
final_ref.loc[-1] = finetuned_llm_guess
|