hidevscommunity commited on
Commit
e847b04
1 Parent(s): fa8ab4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -1
app.py CHANGED
@@ -153,4 +153,72 @@ else:
153
 
154
  # Add a footer
155
  st.markdown("---")
156
- st.markdown("By AI Planet")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
  # Add a footer
155
  st.markdown("---")
156
+ st.markdown("By AI Planet")
157
+
158
+
159
+
160
+ from trulens_eval import Tru
161
+ from trulens_eval.tru_custom_app import instrument
162
+ from trulens_eval import Feedback, Select
163
+ from trulens_eval.feedback.provider.openai import OpenAI
164
+ import numpy as np
165
+ from trulens_eval import TruCustomApp
166
+ tru = Tru()
167
+ tru.reset_database()
168
+
169
+ def append_to_csv(user_query, provider, retriever, csv_path='relevance_scores.csv'):
170
+
171
+ # provider = OpenAI(model_engine="gpt-4o")
172
+ provider = provider
173
+ # Define a groundedness feedback function
174
+ f_groundedness = (
175
+ Feedback(provider.groundedness_measure_with_cot_reasons, name = "Groundedness")
176
+ .on(Select.RecordCalls.retrieve.rets.collect())
177
+ .on_output()
178
+ )
179
+ # Question/answer relevance between overall question and answer.
180
+ f_answer_relevance = (
181
+ Feedback(provider.relevance_with_cot_reasons, name = "Answer Relevance")
182
+ .on_input()
183
+ .on_output()
184
+ )
185
+
186
+ # Context relevance between question and each context chunk.
187
+ f_context_relevance = (
188
+ Feedback(provider.context_relevance_with_cot_reasons, name = "Context Relevance")
189
+ .on_input()
190
+ .on(Select.RecordCalls.retrieve.rets[:])
191
+ .aggregate(np.mean) # choose a different aggregation method if you wish
192
+ )
193
+
194
+ tru_rag = TruCustomApp(retriever,
195
+ app_id = 'RAG v1',
196
+ feedbacks = [f_groundedness, f_answer_relevance, f_context_relevance])
197
+
198
+ with tru_rag as recording:
199
+ llm_response = retriever.get_relevant_documents(user_query)
200
+
201
+ rec = recording.get()
202
+ feedback_results = rec.wait_for_feedback_results()
203
+
204
+ # Collect feedback results
205
+ feedback_scores = {feedback.name: feedback_result.result for feedback, feedback_result in feedback_results.items()}
206
+
207
+ data = {
208
+ "User Query": [user_query],
209
+ "LLM Response": [llm_response],
210
+ "Answer Relevance": [feedback_scores.get("Answer Relevance")],
211
+ "Context Relevance": [feedback_scores.get("Context Relevance")],
212
+ "Groundedness": [feedback_scores.get("Groundedness")]
213
+ }
214
+ df = pd.DataFrame(data)
215
+
216
+ # Check if the CSV file exists
217
+ if not os.path.isfile(csv_path):
218
+ df.to_csv(csv_path, index=False)
219
+ else:
220
+ df.to_csv(csv_path, mode='a', header=False, index=False)
221
+
222
+
223
+
224
+ append_to_csv(prompt,OpenAI(model_engine="gpt-4o"),retriever)