eagle0504 commited on
Commit
b220eb0
β€’
1 Parent(s): 7213597

L137 and L141 updated

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -134,11 +134,13 @@ if prompt := st.chat_input(initial_input):
134
  try:
135
  llm_response = llama2_7b_ysa(question)
136
  except:
137
- llm_response = "Sorry, the inference endpoint is temporarily down. πŸ˜”"
 
138
  else:
139
- llm_response = (
140
  "Sorry for the delay. We are in the progress of fine-tune the model. βš™οΈ"
141
  )
 
142
 
143
  finetuned_llm_guess = ["from_llm", question, llm_response, 0]
144
  final_ref.loc[-1] = finetuned_llm_guess
 
134
  try:
135
  llm_response = llama2_7b_ysa(question)
136
  except:
137
+ st.warning("Sorry, the inference endpoint is temporarily down. πŸ˜”")
138
+ llm_response = "NA."
139
  else:
140
+ st.warning(
141
  "Sorry for the delay. We are in the progress of fine-tune the model. βš™οΈ"
142
  )
143
+ llm_response = "NA"
144
 
145
  finetuned_llm_guess = ["from_llm", question, llm_response, 0]
146
  final_ref.loc[-1] = finetuned_llm_guess