eagle0504 commited on
Commit
b26e319
β€’
1 Parent(s): 323405b

button added

Browse files
Files changed (1) hide show
  1. app.py +91 -89
app.py CHANGED
@@ -59,6 +59,7 @@ special_threshold = st.sidebar.number_input(
59
  st.sidebar.success(
60
  "The 'distances' score indicates the proximity of your question to our database questions (lower is better). The 'ai_judge' ranks the similarity between user's question and database answers independently (higher is better)."
61
  )
 
62
  clear_button = st.sidebar.button("Clear Conversation", key="clear")
63
  if clear_button:
64
  st.session_state.messages = []
@@ -114,99 +115,100 @@ with st.spinner("Loading, please be patient with us ... πŸ™"):
114
 
115
 
116
  # React to user input
117
- if prompt := st.chat_input(initial_input):
118
- with st.spinner("Loading, please be patient with us ... πŸ™"):
119
- # Display user message in chat message container
120
- st.chat_message("user").markdown(prompt)
121
- # Add user message to chat history
122
- st.session_state.messages.append({"role": "user", "content": prompt})
123
-
124
- question = prompt
125
- begin_t = time.time()
126
- results = collection.query(query_texts=question, n_results=5)
127
- end_t = time.time()
128
- st.success(f"Query answser. | Time: {end_t - begin_t} sec")
129
- idx = results["ids"][0]
130
- idx = [int(i) for i in idx]
131
- ref = pd.DataFrame(
132
- {
133
- "idx": idx,
134
- "questions": [dataset["train"]["questions"][i] for i in idx],
135
- "answers": [dataset["train"]["answers"][i] for i in idx],
136
- "distances": results["distances"][0],
137
- }
138
- )
139
- # special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
140
- filtered_ref = ref[ref["distances"] < special_threshold]
141
- if filtered_ref.shape[0] > 0:
142
- st.success("There are highly relevant information in our database.")
143
- ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
144
- final_ref = filtered_ref
145
- else:
146
- st.warning(
147
- "The database may not have relevant information to help your question so please be aware of hallucinations."
148
  )
149
- ref_from_db_search = ref["answers"].str.cat(sep=" ")
150
- final_ref = ref
151
-
152
- if option == "YSA":
153
- try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  begin_t = time.time()
155
- llm_response = llama2_7b_ysa(question)
 
 
 
 
 
 
 
 
 
 
 
 
156
  end_t = time.time()
157
- st.success(f"Running LLM. | Time: {end_t - begin_t} sec")
158
- except:
159
- st.warning("Sorry, the inference endpoint is temporarily down. πŸ˜”")
160
- llm_response = "NA."
161
- else:
162
- st.warning(
163
- "Apologies! We are in the progress of fine-tune the model, so it's currently unavailable. βš™οΈ"
164
- )
165
- llm_response = "NA"
166
 
167
- finetuned_llm_guess = ["from_llm", question, llm_response, 0]
168
- final_ref.loc[-1] = finetuned_llm_guess
169
- final_ref = final_ref.reset_index()
170
 
171
- # add ai judge as additional rating
172
- if run_ai_judge == "Yes":
173
- independent_ai_judge_score = []
174
- begin_t = time.time()
175
- for i in range(final_ref.shape[0]):
176
- this_content = final_ref["answers"][i]
177
- if len(this_content) > 3:
178
- arr1 = openai_text_embedding(question)
179
- arr2 = openai_text_embedding(this_content)
180
- # this_score = calculate_sts_openai_score(question, this_content)
181
- this_score = quantized_influence(arr1, arr2)
182
- else:
183
- this_score = 0
184
- independent_ai_judge_score.append(this_score)
185
-
186
- final_ref["ai_judge"] = independent_ai_judge_score
187
 
 
 
 
 
 
188
  end_t = time.time()
189
- st.success(f"Using AI Judge. | Time: {end_t - begin_t} sec")
190
-
191
- engineered_prompt = f"""
192
- Based on the context: {ref_from_db_search}
193
-
194
- answer the user question: {question}
195
-
196
- Answer the question directly (don't say "based on the context, ...")
197
- """
198
-
199
- begin_t = time.time()
200
- answer = call_chatgpt(engineered_prompt)
201
- end_t = time.time()
202
- st.success(f"Final API Call. | Time: {end_t - begin_t} sec")
203
- response = answer
204
-
205
- # Display assistant response in chat message container
206
- with st.chat_message("assistant"):
207
- with st.spinner("Wait for it..."):
208
- st.markdown(response)
209
- with st.expander("See reference:"):
210
- st.table(final_ref)
211
- # Add assistant response to chat history
212
- st.session_state.messages.append({"role": "assistant", "content": response})
 
59
  st.sidebar.success(
60
  "The 'distances' score indicates the proximity of your question to our database questions (lower is better). The 'ai_judge' ranks the similarity between user's question and database answers independently (higher is better)."
61
  )
62
+ submit_button = st.sidebar.button("Submit", type="primary")
63
  clear_button = st.sidebar.button("Clear Conversation", key="clear")
64
  if clear_button:
65
  st.session_state.messages = []
 
115
 
116
 
117
  # React to user input
118
+ if submit_button:
119
+ if prompt := st.chat_input(initial_input):
120
+ with st.spinner("Loading, please be patient with us ... πŸ™"):
121
+ # Display user message in chat message container
122
+ st.chat_message("user").markdown(prompt)
123
+ # Add user message to chat history
124
+ st.session_state.messages.append({"role": "user", "content": prompt})
125
+
126
+ question = prompt
127
+ begin_t = time.time()
128
+ results = collection.query(query_texts=question, n_results=5)
129
+ end_t = time.time()
130
+ st.success(f"Query answser. | Time: {end_t - begin_t} sec")
131
+ idx = results["ids"][0]
132
+ idx = [int(i) for i in idx]
133
+ ref = pd.DataFrame(
134
+ {
135
+ "idx": idx,
136
+ "questions": [dataset["train"]["questions"][i] for i in idx],
137
+ "answers": [dataset["train"]["answers"][i] for i in idx],
138
+ "distances": results["distances"][0],
139
+ }
 
 
 
 
 
 
 
 
 
140
  )
141
+ # special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
142
+ filtered_ref = ref[ref["distances"] < special_threshold]
143
+ if filtered_ref.shape[0] > 0:
144
+ st.success("There are highly relevant information in our database.")
145
+ ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
146
+ final_ref = filtered_ref
147
+ else:
148
+ st.warning(
149
+ "The database may not have relevant information to help your question so please be aware of hallucinations."
150
+ )
151
+ ref_from_db_search = ref["answers"].str.cat(sep=" ")
152
+ final_ref = ref
153
+
154
+ if option == "YSA":
155
+ try:
156
+ begin_t = time.time()
157
+ llm_response = llama2_7b_ysa(question)
158
+ end_t = time.time()
159
+ st.success(f"Running LLM. | Time: {end_t - begin_t} sec")
160
+ except:
161
+ st.warning("Sorry, the inference endpoint is temporarily down. πŸ˜”")
162
+ llm_response = "NA."
163
+ else:
164
+ st.warning(
165
+ "Apologies! We are in the progress of fine-tune the model, so it's currently unavailable. βš™οΈ"
166
+ )
167
+ llm_response = "NA"
168
+
169
+ finetuned_llm_guess = ["from_llm", question, llm_response, 0]
170
+ final_ref.loc[-1] = finetuned_llm_guess
171
+ final_ref = final_ref.reset_index()
172
+
173
+ # add ai judge as additional rating
174
+ if run_ai_judge == "Yes":
175
+ independent_ai_judge_score = []
176
  begin_t = time.time()
177
+ for i in range(final_ref.shape[0]):
178
+ this_content = final_ref["answers"][i]
179
+ if len(this_content) > 3:
180
+ arr1 = openai_text_embedding(question)
181
+ arr2 = openai_text_embedding(this_content)
182
+ # this_score = calculate_sts_openai_score(question, this_content)
183
+ this_score = quantized_influence(arr1, arr2)
184
+ else:
185
+ this_score = 0
186
+ independent_ai_judge_score.append(this_score)
187
+
188
+ final_ref["ai_judge"] = independent_ai_judge_score
189
+
190
  end_t = time.time()
191
+ st.success(f"Using AI Judge. | Time: {end_t - begin_t} sec")
 
 
 
 
 
 
 
 
192
 
193
+ engineered_prompt = f"""
194
+ Based on the context: {ref_from_db_search}
 
195
 
196
+ answer the user question: {question}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
+ Answer the question directly (don't say "based on the context, ...")
199
+ """
200
+
201
+ begin_t = time.time()
202
+ answer = call_chatgpt(engineered_prompt)
203
  end_t = time.time()
204
+ st.success(f"Final API Call. | Time: {end_t - begin_t} sec")
205
+ response = answer
206
+
207
+ # Display assistant response in chat message container
208
+ with st.chat_message("assistant"):
209
+ with st.spinner("Wait for it..."):
210
+ st.markdown(response)
211
+ with st.expander("See reference:"):
212
+ st.table(final_ref)
213
+ # Add assistant response to chat history
214
+ st.session_state.messages.append({"role": "assistant", "content": response})