rchrdgwr commited on
Commit
2393995
β€’
1 Parent(s): 39f7a5d

Fridays updates

Browse files
Dockerfile CHANGED
@@ -8,4 +8,4 @@ COPY --chown=user . $HOME/app
8
  COPY ./requirements.txt ~/app/requirements.txt
9
  RUN pip install -r requirements.txt
10
  COPY . .
11
- CMD ["chainlit", "run", "app_main.py", "--port", "7860"]
 
8
  COPY ./requirements.txt ~/app/requirements.txt
9
  RUN pip install -r requirements.txt
10
  COPY . .
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
__pycache__/app.cpython-311.pyc ADDED
Binary file (6.96 kB). View file
 
__pycache__/classes.cpython-311.pyc CHANGED
Binary files a/__pycache__/classes.cpython-311.pyc and b/__pycache__/classes.cpython-311.pyc differ
 
__pycache__/utils_actions.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_actions.cpython-311.pyc and b/__pycache__/utils_actions.cpython-311.pyc differ
 
__pycache__/utils_callbacks.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_callbacks.cpython-311.pyc and b/__pycache__/utils_callbacks.cpython-311.pyc differ
 
__pycache__/utils_chain_parameters.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_chain_parameters.cpython-311.pyc and b/__pycache__/utils_chain_parameters.cpython-311.pyc differ
 
__pycache__/utils_evaluate.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_evaluate.cpython-311.pyc and b/__pycache__/utils_evaluate.cpython-311.pyc differ
 
__pycache__/utils_evaluate_objections.cpython-311.pyc ADDED
Binary file (8.21 kB). View file
 
__pycache__/utils_objections.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_objections.cpython-311.pyc and b/__pycache__/utils_objections.cpython-311.pyc differ
 
__pycache__/utils_output.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_output.cpython-311.pyc and b/__pycache__/utils_output.cpython-311.pyc differ
 
__pycache__/utils_prep.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_prep.cpython-311.pyc and b/__pycache__/utils_prep.cpython-311.pyc differ
 
__pycache__/utils_prompt.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_prompt.cpython-311.pyc and b/__pycache__/utils_prompt.cpython-311.pyc differ
 
__pycache__/utils_simulation.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_simulation.cpython-311.pyc and b/__pycache__/utils_simulation.cpython-311.pyc differ
 
__pycache__/utils_voice.cpython-311.pyc CHANGED
Binary files a/__pycache__/utils_voice.cpython-311.pyc and b/__pycache__/utils_voice.cpython-311.pyc differ
 
app_main.py β†’ app.py RENAMED
@@ -31,7 +31,22 @@ whisper_model = whisper.load_model("base")
31
  # Action callbacks
32
  #############################################
33
 
34
- @cl.action_callback("HSBC: Lending - Loan Origination System (Qualification)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  async def on_action_anayze_opportunity(action):
36
  await prep_opportunity_analysis()
37
 
@@ -57,6 +72,7 @@ async def on_action_display_queries_responses(action):
57
  await callback_display_queries_responses()
58
 
59
 
 
60
  #############################################
61
  ### On Chat Start (Session Start) Section ###
62
  #############################################
@@ -74,9 +90,7 @@ async def on_chat_start():
74
 
75
  await prep_start(session_state)
76
 
77
- await prep_opportunities(session_state)
78
-
79
-
80
 
81
  # await prep_opportunity_analysis(session_state)
82
 
 
31
  # Action callbacks
32
  #############################################
33
 
34
+ @cl.action_callback("Deal Analysis")
35
+ async def on_action_anayze_deal(action):
36
+ session_state = cl.user_session.get("session_state", None)
37
+ await prep_opportunities(session_state)
38
+
39
+ @cl.action_callback("Customer Research")
40
+ async def on_action_anayze_deal(action):
41
+ session_state = cl.user_session.get("session_state", None)
42
+ await get_latest_news("HSBC")
43
+
44
+ @cl.action_callback("Sales Simulation")
45
+ async def on_action_sales_simulation(action):
46
+ session_state = cl.user_session.get("session_state", None)
47
+ await callback_run_scenario(action)
48
+
49
+ @cl.action_callback("HSBC: Lending - Loan Origination System (Proposal)")
50
  async def on_action_anayze_opportunity(action):
51
  await prep_opportunity_analysis()
52
 
 
72
  await callback_display_queries_responses()
73
 
74
 
75
+
76
  #############################################
77
  ### On Chat Start (Session Start) Section ###
78
  #############################################
 
90
 
91
  await prep_start(session_state)
92
 
93
+ # await prep_opportunities(session_state)
 
 
94
 
95
  # await prep_opportunity_analysis(session_state)
96
 
app_am.py CHANGED
@@ -18,7 +18,7 @@ from langchain_core.runnables.passthrough import RunnablePassthrough
18
  from langchain.schema.runnable.config import RunnableConfig
19
  from langsmith.evaluation import LangChainStringEvaluator, evaluate
20
  from datetime import datetime
21
- from objection_eval import generate_response_to_objection
22
  import pandas as pd
23
  import uuid
24
  import chainlit as cl
 
18
  from langchain.schema.runnable.config import RunnableConfig
19
  from langsmith.evaluation import LangChainStringEvaluator, evaluate
20
  from datetime import datetime
21
+ from utils_evaluate_objections import generate_response_to_objection
22
  import pandas as pd
23
  import uuid
24
  import chainlit as cl
classes.py CHANGED
@@ -4,8 +4,9 @@ class SessionState:
4
  do_opportunity_analysis = False
5
  do_customer_research = False
6
  do_objections = False
7
- add_objections_to_analysis = False
8
  ask_objections = True
 
9
  do_ragas_evaluation = False
10
  customer_research_report_md = "HSBC Quarterly Report 2024-10-16.md"
11
  customer_research_report_pdf = "HSBC Quarterly Report 2024-10-16.pdf"
@@ -20,7 +21,7 @@ class SessionState:
20
  duration_minutes = None
21
  attitude = "Happy"
22
  mood_score = 5
23
- num_questions = 4
24
  current_question_index = 0
25
  previous_answer = None
26
  question = ""
@@ -40,6 +41,7 @@ class SessionState:
40
  self.do_objections = False
41
  self.add_objections_to_analysis = False
42
  self.ask_objections = True
 
43
  self.do_ragas_evaluation = False
44
  self.customer_research_report_md = "HSBC Quarterly Report 2024-10-16.md"
45
  self.customer_research_report_pdf = "HSBC Quarterly Report 2024-10-16.pdf"
@@ -54,7 +56,7 @@ class SessionState:
54
  self.duration_minutes = None
55
  self.attitude = "Happy"
56
  self.mood_score = 5
57
- self.num_questions = 4
58
  self.current_question_index = 0
59
  self.previous_answer = None
60
  self.question = ""
 
4
  do_opportunity_analysis = False
5
  do_customer_research = False
6
  do_objections = False
7
+ add_objections_to_analysis = True
8
  ask_objections = True
9
+ use_objection_cache = True
10
  do_ragas_evaluation = False
11
  customer_research_report_md = "HSBC Quarterly Report 2024-10-16.md"
12
  customer_research_report_pdf = "HSBC Quarterly Report 2024-10-16.pdf"
 
21
  duration_minutes = None
22
  attitude = "Happy"
23
  mood_score = 5
24
+ num_questions = 2
25
  current_question_index = 0
26
  previous_answer = None
27
  question = ""
 
41
  self.do_objections = False
42
  self.add_objections_to_analysis = False
43
  self.ask_objections = True
44
+ self.use_objection_cache = True
45
  self.do_ragas_evaluation = False
46
  self.customer_research_report_md = "HSBC Quarterly Report 2024-10-16.md"
47
  self.customer_research_report_pdf = "HSBC Quarterly Report 2024-10-16.pdf"
 
56
  self.duration_minutes = None
57
  self.attitude = "Happy"
58
  self.mood_score = 5
59
+ self.num_questions = 2
60
  self.current_question_index = 0
61
  self.previous_answer = None
62
  self.question = ""
data/Opportunity_Information.csv CHANGED
@@ -1,3 +1,3 @@
1
  Opportunity ID,Customer Name,Opportunity Name,Opportunity Stage,Opportunity Description,Opportunity Value,Close Date,Customer Contact,Customer Contact Role,Activity,Next Steps
2
- XFR0001,HSBC,Lending - Loan Origination System,Qualification,Developing analytic capabilities for the loan origination system,"$250,000",12/30/2024,John Smith,"VP, Information Technology",Had first meeting with John on 9/16. Identified that they are unhappy with the analytic capability of their current lending solution. Next step is to meet with them to understand their business drivers and pain points better. Will also use the next meeting to explain our company's value proposition.,Meet with John on 10/12 to qualify the opportunity better and share our value proposition
3
  XFR0002,Citi,Competitive Analysis SaaS,Negotiation,Develop SaaS for analysis of Citi's competitors in foreign markets,"$100,000",11/15/2024,Peter Branson,"CEO",Contract finally ironed out by both parties' lawyers. Have verbal agreement that this will be moving forwards.,Finalize contract with Peter and lawyers on October 25th
 
1
  Opportunity ID,Customer Name,Opportunity Name,Opportunity Stage,Opportunity Description,Opportunity Value,Close Date,Customer Contact,Customer Contact Role,Activity,Next Steps
2
+ XFR0001,HSBC,Lending - Loan Origination System,Proposal,Developing analytic capabilities for the loan origination system,"$250,000",11/30/2024,John Smith,"VP, Information Technology",We have had several meetings with HSBC's lending team regarding replacing the analytics engine for their lending solution. The current system is slow and inflexible. They have the renewal coming up with the existing vendor next year so there is urgency regarding the decision process. ,"Next Steps: Meet with John on 10/26 discuss next steps in the decision-making process, potentially moving towards a pilot program or final negotiations."
3
  XFR0002,Citi,Competitive Analysis SaaS,Negotiation,Develop SaaS for analysis of Citi's competitors in foreign markets,"$100,000",11/15/2024,Peter Branson,"CEO",Contract finally ironed out by both parties' lawyers. Have verbal agreement that this will be moving forwards.,Finalize contract with Peter and lawyers on October 25th
images/salesbuddy_logo.jpg ADDED
utils_actions.py CHANGED
@@ -1,10 +1,21 @@
1
  import chainlit as cl
2
 
 
 
 
 
 
 
 
 
 
3
  async def offer_actions():
 
4
  actions = [
5
  cl.Action(name="Get Latest News on this Customer", value="HSBC", description="Get Latest News"),
6
  cl.Action(name="Enter Meeting Simulation", value="enter-meeting-simulation", description="Enter Meeting Simulation"),
7
  cl.Action(name="Review Another Opportunity", value="review-another-opportunity", description="Review Another Opportunity"),
8
  ]
9
  await cl.Message(content="Select an action", actions=actions).send()
 
10
 
 
1
  import chainlit as cl
2
 
3
+ async def offer_initial_actions():
4
+ actions = [
5
+ cl.Action(name="Deal Analysis", value="deal-analysis", description="Deal Analysis"),
6
+ cl.Action(name="Customer Research", value="customer-research", description="Customer Research"),
7
+ cl.Action(name="Sales Simulation", value="sales-simulation", description="Sales Simulation"),
8
+ ]
9
+ await cl.Message(content=" ", actions=actions).send()
10
+ await cl.Message(content="\n\n").send()
11
+
12
  async def offer_actions():
13
+ await cl.Message(content="\n\n").send()
14
  actions = [
15
  cl.Action(name="Get Latest News on this Customer", value="HSBC", description="Get Latest News"),
16
  cl.Action(name="Enter Meeting Simulation", value="enter-meeting-simulation", description="Enter Meeting Simulation"),
17
  cl.Action(name="Review Another Opportunity", value="review-another-opportunity", description="Review Another Opportunity"),
18
  ]
19
  await cl.Message(content="Select an action", actions=actions).send()
20
+ await cl.Message(content="\n\n").send()
21
 
utils_callbacks.py CHANGED
@@ -66,15 +66,25 @@ async def callback_run_scenario(action):
66
  await cl.Message(content="Click to start simulation", actions=start_actions).send()
67
 
68
 
 
69
  async def callback_start_scenario():
70
  print("callback_start_scenario()")
71
  session_state = cl.user_session.get("session_state", None)
 
 
 
 
 
 
 
 
72
  start_time = datetime.now()
73
  print("setting start time")
74
  session_state.start_time = start_time
75
  output = f"{session_state.customer.contact_name} joins the zoom call"
76
  print(output)
77
  await cl.Message(content=output).send()
 
78
 
79
  async def callback_evaluate_performance():
80
  session_state = cl.user_session.get("session_state", None)
 
66
  await cl.Message(content="Click to start simulation", actions=start_actions).send()
67
 
68
 
69
+
70
  async def callback_start_scenario():
71
  print("callback_start_scenario()")
72
  session_state = cl.user_session.get("session_state", None)
73
+ await cl.Message(content="3...").send()
74
+ await asyncio.sleep(1)
75
+ await cl.Message(content="2...").send()
76
+ await asyncio.sleep(1)
77
+ await cl.Message(content="1...").send()
78
+ await asyncio.sleep(1)
79
+ await cl.Message(content="**Simulation Starting**").send()
80
+ await cl.Message(content="\n\n").send()
81
  start_time = datetime.now()
82
  print("setting start time")
83
  session_state.start_time = start_time
84
  output = f"{session_state.customer.contact_name} joins the zoom call"
85
  print(output)
86
  await cl.Message(content=output).send()
87
+ await cl.Message(content="\n\n").send()
88
 
89
  async def callback_evaluate_performance():
90
  session_state = cl.user_session.get("session_state", None)
utils_chain_parameters.py CHANGED
@@ -5,27 +5,32 @@ def prepare_chain_parameters(session_state, message, history):
5
  next_question = ""
6
  ground_truth = ""
7
  command = ""
 
8
  print(f"Index: {session_state.current_question_index}")
9
  if session_state.current_question_index == 0:
10
  previous_question = ""
11
  rep_answer = ""
12
  ground_truth = ""
13
  next_question = session_state.questions[session_state.current_question_index]["question"]
14
- command = "You should greet the rep"
 
15
  elif session_state.current_question_index >= len(session_state.questions):
16
  next_question = ""
17
  previous_question = session_state.questions[session_state.current_question_index - 1]["question"]
18
  rep_answer = session_state.previous_answer
19
  ground_truth = session_state.questions[session_state.current_question_index - 1]["ground_truth"]
 
 
20
  command = """Thank the customer, offer a comment on the answer and overall performance.
21
- Conclude the conversation with a summary and give a farewell.
22
- You can add additional comments as needed.
23
  """
24
  else:
25
  previous_question = session_state.questions[session_state.current_question_index - 1]["question"]
26
  rep_answer = session_state.previous_answer
27
  next_question = session_state.questions[session_state.current_question_index]["question"]
28
  ground_truth = session_state.questions[session_state.current_question_index]["ground_truth"]
 
29
  command = "You should respond to the answer based on how well the rep answered the previous question."
30
  session_state.ground_truth = ground_truth
31
  session_state.question = previous_question
@@ -72,6 +77,7 @@ def prepare_chain_parameters(session_state, message, history):
72
  "rep_answer": rep_answer,
73
  "conversation_history": history,
74
  "command": command,
 
75
  }
76
  return parm
77
 
 
5
  next_question = ""
6
  ground_truth = ""
7
  command = ""
8
+ all_questions_answers = ""
9
  print(f"Index: {session_state.current_question_index}")
10
  if session_state.current_question_index == 0:
11
  previous_question = ""
12
  rep_answer = ""
13
  ground_truth = ""
14
  next_question = session_state.questions[session_state.current_question_index]["question"]
15
+ all_questions_answers = ""
16
+ command = "You should greet the salesrep"
17
  elif session_state.current_question_index >= len(session_state.questions):
18
  next_question = ""
19
  previous_question = session_state.questions[session_state.current_question_index - 1]["question"]
20
  rep_answer = session_state.previous_answer
21
  ground_truth = session_state.questions[session_state.current_question_index - 1]["ground_truth"]
22
+ for response in session_state.responses:
23
+ all_questions_answers += f"Question: {response['question']}\nAnswer: {response['response']}\n\n"
24
  command = """Thank the customer, offer a comment on the answer and overall performance.
25
+ Conclude the conversation with a conclusion based on all of the questions and answers.
26
+ Give a polite farewell.
27
  """
28
  else:
29
  previous_question = session_state.questions[session_state.current_question_index - 1]["question"]
30
  rep_answer = session_state.previous_answer
31
  next_question = session_state.questions[session_state.current_question_index]["question"]
32
  ground_truth = session_state.questions[session_state.current_question_index]["ground_truth"]
33
+ all_questions_answers = ""
34
  command = "You should respond to the answer based on how well the rep answered the previous question."
35
  session_state.ground_truth = ground_truth
36
  session_state.question = previous_question
 
77
  "rep_answer": rep_answer,
78
  "conversation_history": history,
79
  "command": command,
80
+ "all_questions_answers": all_questions_answers
81
  }
82
  return parm
83
 
utils_evaluate.py CHANGED
@@ -10,6 +10,25 @@ from ragas.metrics import (
10
  from rouge_score import rouge_scorer
11
  from sentence_transformers import SentenceTransformer, util
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def evaluate_answers(session):
14
  ragas_results = evaluate_with_ragas(session)
15
  session.ragas_results = ragas_results
@@ -27,8 +46,6 @@ def evaluate_answers(session):
27
  session.scores = scores
28
  return scores
29
 
30
-
31
-
32
  def evaluate_with_ragas(session):
33
  questions = []
34
  answers = []
 
10
  from rouge_score import rouge_scorer
11
  from sentence_transformers import SentenceTransformer, util
12
 
13
+ from utils_evaluate_objections import generate_objection_scores
14
+
15
+
16
+ def evaluate_objections(session):
17
+
18
+ for response in session.responses:
19
+ question = response.get("question", "")
20
+ answer = response.get("response", "")
21
+ print(f"Question: {question}")
22
+ print(f"Answer: {answer}")
23
+
24
+ q_and_a = {
25
+ "objection": question,
26
+ "answer": answer
27
+ }
28
+ score = generate_objection_scores(q_and_a)
29
+ response["evaluation_score"] = score
30
+
31
+
32
  def evaluate_answers(session):
33
  ragas_results = evaluate_with_ragas(session)
34
  session.ragas_results = ragas_results
 
46
  session.scores = scores
47
  return scores
48
 
 
 
49
  def evaluate_with_ragas(session):
50
  questions = []
51
  answers = []
objection_eval.py β†’ utils_evaluate_objections.py RENAMED
@@ -92,6 +92,24 @@ class SatisfyRate(MetricWithLLM, SingleTurnMetric):
92
  )
93
  return int(prompt_response.satisfy)
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  async def generate_response_to_objection(file_path, num):
96
  from langchain_openai import ChatOpenAI
97
  from ragas.llms.base import LangchainLLMWrapper
@@ -124,5 +142,4 @@ if __name__ == "__main__":
124
  file_path = sys.argv[1]
125
 
126
  # Run the main async function
127
- asyncio.run(main(file_path))
128
-
 
92
  )
93
  return int(prompt_response.satisfy)
94
 
95
+ async def generate_objection_scores(question_answer):
96
+ from langchain_openai import ChatOpenAI
97
+ from ragas.llms.base import LangchainLLMWrapper
98
+ import pandas as pd
99
+ # user_response= pd.read_csv(file_path)
100
+ openai_model = LangchainLLMWrapper(ChatOpenAI(model_name="gpt-4o", api_key=OPENAI_API_KEY))
101
+ scorer = SatisfyRate(llm=openai_model)
102
+
103
+ sample = SingleTurnSample(user_input=question_answer['objection'], response=question_answer['answer'])
104
+
105
+ #(user_response['objection'][num], user_response['response'][num])
106
+ satisfy_0_1 = await scorer.single_turn_ascore(sample)
107
+
108
+ print (question_answer['objection'], question_answer['answer'], satisfy_0_1)
109
+ # Implement your logic to generate a response based on the user's input
110
+ return satisfy_0_1 #f"Response to your objection: {user_response['objection'][num]}, {user_response['response'][num]}, {satisfy_0_1}"
111
+
112
+
113
  async def generate_response_to_objection(file_path, num):
114
  from langchain_openai import ChatOpenAI
115
  from ragas.llms.base import LangchainLLMWrapper
 
142
  file_path = sys.argv[1]
143
 
144
  # Run the main async function
145
+ asyncio.run(main(file_path))
 
utils_objections.py CHANGED
@@ -18,11 +18,20 @@ from qdrant_client.http.models import Distance, VectorParams
18
 
19
 
20
  async def create_objections(session_state):
21
- customer_document_file = session_state.customer_research_report_pdf
22
- customer_file_path = "reports/" + customer_document_file
23
- bettertech_document_file = session_state.bettetech_value_proposition_pdf
24
- bettertech_file_path = "data/" + bettertech_document_file
25
- objections = await process_files(customer_file_path, bettertech_file_path)
 
 
 
 
 
 
 
 
 
26
  return objections
27
 
28
 
 
18
 
19
 
20
  async def create_objections(session_state):
21
+ if session_state.use_objection_cache:
22
+
23
+ objections = [
24
+ "1. Can you provide customer references in our industry?",
25
+ "2. Second question, what training options are available for our team?",
26
+ "3. Last but not least, your pricing seems high compared to some other solutions we've seen. Is there any flexibility??",
27
+ ]
28
+
29
+ else:
30
+ customer_document_file = session_state.customer_research_report_pdf
31
+ customer_file_path = "reports/" + customer_document_file
32
+ bettertech_document_file = session_state.bettetech_value_proposition_pdf
33
+ bettertech_file_path = "data/" + bettertech_document_file
34
+ objections = await process_files(customer_file_path, bettertech_file_path)
35
  return objections
36
 
37
 
utils_output.py CHANGED
@@ -3,8 +3,8 @@ import json
3
  import re
4
  from datetime import datetime
5
 
6
- from utils_evaluate import evaluate_answers
7
-
8
  async def display_llm_responses(cl, session_state):
9
  output = f"**Responses**"
10
  await cl.Message(content=output).send()
@@ -61,7 +61,10 @@ async def display_evaluation_results(cl, session_state):
61
  out_text = "*Preparing evaluation results ...*"
62
  await cl.Message(content=out_text).send()
63
 
64
- evaluate_answers(session_state)
 
 
 
65
  await asyncio.sleep(1)
66
 
67
  output = f"**Session Summary**"
@@ -73,15 +76,15 @@ async def display_evaluation_results(cl, session_state):
73
  output = output + f"**Total Questions Answered:** {len(session_state.responses)} \n"
74
  await cl.Message(content=output).send()
75
 
76
- results_df = session_state.ragas_results.to_pandas()
77
- columns_to_average = ['answer_relevancy', 'answer_correctness']
78
- averages = results_df[columns_to_average].mean()
 
79
 
80
  await cl.Message(content="**Overall Summary (By SalesBuddy)**").send()
81
- output = f"**Overall Score:** {session_state.responses[-1]['overall_score']} \n"
82
- output = output + f"**Overall Evaluation:** {session_state.responses[-1]['overall_evaluation']} \n"
83
- output = output + f"**Final Mood Score:** {session_state.responses[-1]['mood_score']} \n"
84
- output = output + f"**Customer Next Steps:** {session_state.llm_next_steps} \n"
85
  await cl.Message(content=output).send()
86
 
87
  if session_state.do_ragas_evaluation:
@@ -93,21 +96,23 @@ async def display_evaluation_results(cl, session_state):
93
  await cl.Message(content="**Individual Question Scores**").send()
94
 
95
  for index, resp in enumerate(session_state.responses):
96
- scores = session_state.scores[index]
97
- relevancy = results_df.iloc[index].get('answer_relevancy', 'N/A')
98
- correctness = results_df.iloc[index].get('answer_correctness', 'N/A')
99
- bleu_score = scores.get('bleu_score', 'N/A')
100
- rouge1_score = scores.get('rouge_score', {}).get('rouge1', 'N/A')
101
- rouge1_output = format_rogue_score(rouge1_score)
102
- rougeL_score = scores.get('rouge_score', {}).get('rougeL', 'N/A')
103
- rougeL_output = format_rogue_score(rougeL_score)
104
- semantic_similarity_score = scores.get('semantic_similarity_score', 'N/A')
105
  output = f"""
106
  **Question:** {resp.get('question', 'N/A')}
107
- **Answer:** {resp.get('response', 'N/A')}
108
- **Ground Truth:** {resp.get('ground_truth', 'N/A')}
 
109
  """
110
  if session_state.do_ragas_evaluation:
 
 
 
 
 
 
 
 
 
111
  numbers = f"""
112
  **Answer Relevancy:** {format_score(relevancy)}
113
  **Answer Correctness:** {format_score(correctness)}
@@ -120,3 +125,5 @@ async def display_evaluation_results(cl, session_state):
120
  await cl.Message(content=numbers).send()
121
  else:
122
  await cl.Message(content=output).send()
 
 
 
3
  import re
4
  from datetime import datetime
5
 
6
+ from utils_evaluate import evaluate_answers, evaluate_objections
7
+ from utils_prep import offer_initial_actions
8
  async def display_llm_responses(cl, session_state):
9
  output = f"**Responses**"
10
  await cl.Message(content=output).send()
 
61
  out_text = "*Preparing evaluation results ...*"
62
  await cl.Message(content=out_text).send()
63
 
64
+ if session_state.do_evaluation:
65
+ evaluate_answers(session_state)
66
+ elif session_state.add_objections_to_analysis:
67
+ evaluate_objections(session_state)
68
  await asyncio.sleep(1)
69
 
70
  output = f"**Session Summary**"
 
76
  output = output + f"**Total Questions Answered:** {len(session_state.responses)} \n"
77
  await cl.Message(content=output).send()
78
 
79
+ if session_state.do_ragas_evaluation:
80
+ results_df = session_state.ragas_results.to_pandas()
81
+ columns_to_average = ['answer_relevancy', 'answer_correctness']
82
+ averages = results_df[columns_to_average].mean()
83
 
84
  await cl.Message(content="**Overall Summary (By SalesBuddy)**").send()
85
+ output = f"**SalesBuddy Score:** {session_state.responses[-1]['overall_score']} \n"
86
+ output = output + f"**SalesBuddy Evaluation:** {session_state.responses[-1]['overall_evaluation']} \n"
87
+ output = output + f"**SalesBuddy Final Mood Score:** {session_state.responses[-1]['mood_score']} \n"
 
88
  await cl.Message(content=output).send()
89
 
90
  if session_state.do_ragas_evaluation:
 
96
  await cl.Message(content="**Individual Question Scores**").send()
97
 
98
  for index, resp in enumerate(session_state.responses):
99
+
 
 
 
 
 
 
 
 
100
  output = f"""
101
  **Question:** {resp.get('question', 'N/A')}
102
+ **Answer:** {resp.get('response', 'N/A')}
103
+ **SalesBuddy Evaluation:** {resp.get('response_evaluation', 'N/A')}
104
+ **Evaluation Score:** {resp.get('response_score', 'N/A')}
105
  """
106
  if session_state.do_ragas_evaluation:
107
+ scores = session_state.scores[index]
108
+ relevancy = scores.get('answer_relevancy', 'N/A')
109
+ correctness = scores.get('answer_correctness', 'N/A')
110
+ bleu_score = scores.get('bleu_score', 'N/A')
111
+ rouge1_score = scores.get('rouge_score', {}).get('rouge1', 'N/A')
112
+ rouge1_output = format_rogue_score(rouge1_score)
113
+ rougeL_score = scores.get('rouge_score', {}).get('rougeL', 'N/A')
114
+ rougeL_output = format_rogue_score(rougeL_score)
115
+ semantic_similarity_score = scores.get('semantic_similarity_score', 'N/A')
116
  numbers = f"""
117
  **Answer Relevancy:** {format_score(relevancy)}
118
  **Answer Correctness:** {format_score(correctness)}
 
125
  await cl.Message(content=numbers).send()
126
  else:
127
  await cl.Message(content=output).send()
128
+
129
+ await offer_initial_actions()
utils_prep.py CHANGED
@@ -2,7 +2,7 @@ import asyncio
2
  import chainlit as cl
3
  from langchain_openai import ChatOpenAI
4
 
5
- from utils_actions import offer_actions
6
  from utils_data import get_company_data, get_opportunities
7
  from utils_prompt import get_chat_prompt
8
  from utils_objections import create_objections
@@ -16,9 +16,18 @@ async def prep_start(session_state):
16
  simple_chain = chat_prompt | chat_model
17
  cl.user_session.set("chain", simple_chain)
18
 
19
- welcome_message = f"**Welcome to {session_state.company.name} SalesBuddy**\n*Home of {session_state.company.product}*"
20
  await cl.Message(content=welcome_message).send()
21
- await cl.Message(content=session_state.company.product_summary).send()
 
 
 
 
 
 
 
 
 
22
 
23
  opportunities = get_opportunities()
24
  cl.user_session.set("opportunities", opportunities)
@@ -93,6 +102,7 @@ async def prep_opportunity_analysis():
93
  for output_message in output_messages:
94
  await cl.Message(content=output_message).send()
95
  await cl.Message(content="").send()
 
96
 
97
  await offer_actions()
98
 
@@ -104,7 +114,7 @@ async def prep_research(session_state):
104
 
105
 
106
  def get_opportunity_analysis():
107
- output_1 = "**Summary:** The HSBC opportunity involves replacing the existing analytics engine for their loan origination system, valued at $250,000. The current system is slow and lacks flexibility, creating urgency due to an impending renewal with the existing vendor. Multiple meetings have been conducted, culminating in a proposal review. The decision process is progressing, with a meeting scheduled to discuss the next steps on October 18, 2024."
108
  output_2 = "**Score: 75**"
109
  output_3 = "**MEDDIC Evaluation:**"
110
  output_4 = "**Metrics: 70** - The proposal discussed expected performance improvements and ROI, but specific quantitative metrics driving the decision were not detailed."
 
2
  import chainlit as cl
3
  from langchain_openai import ChatOpenAI
4
 
5
+ from utils_actions import offer_actions,offer_initial_actions
6
  from utils_data import get_company_data, get_opportunities
7
  from utils_prompt import get_chat_prompt
8
  from utils_objections import create_objections
 
16
  simple_chain = chat_prompt | chat_model
17
  cl.user_session.set("chain", simple_chain)
18
 
19
+ welcome_message = f"**Welcome to {session_state.company.name} SalesBuddy**\n*Your AI assistant for sales and sales management*"
20
  await cl.Message(content=welcome_message).send()
21
+ # await cl.Message(content=session_state.company.product_summary).send()
22
+
23
+ image = cl.Image(path="images/salesbuddy_logo.jpg", name="salesbuddy_logo", display="inline")
24
+ await cl.Message(
25
+ content=" ",
26
+ elements=[image],
27
+ ).send()
28
+
29
+ await offer_initial_actions()
30
+
31
 
32
  opportunities = get_opportunities()
33
  cl.user_session.set("opportunities", opportunities)
 
102
  for output_message in output_messages:
103
  await cl.Message(content=output_message).send()
104
  await cl.Message(content="").send()
105
+ await cl.Message(content="\n\n").send()
106
 
107
  await offer_actions()
108
 
 
114
 
115
 
116
  def get_opportunity_analysis():
117
+ output_1 = "**Summary:** The HSBC opportunity involves replacing the existing analytics engine for their loan origination system, valued at $250,000. The current system is slow and lacks flexibility, creating urgency due to an impending renewal with the existing vendor. Multiple meetings have been conducted, culminating in a proposal review. The decision process is progressing, with a meeting scheduled on October 26 with John Smith to discuss the next steps. Potential for pilot program or final negotiations."
118
  output_2 = "**Score: 75**"
119
  output_3 = "**MEDDIC Evaluation:**"
120
  output_4 = "**Metrics: 70** - The proposal discussed expected performance improvements and ROI, but specific quantitative metrics driving the decision were not detailed."
utils_prompt.py CHANGED
@@ -30,6 +30,9 @@ def get_user_template_openai_short():
30
 
31
  Rep Answer:
32
  {rep_answer}
 
 
 
33
 
34
  """
35
  return user_template
@@ -81,6 +84,9 @@ def get_user_template_openai_long():
81
  Rep Answer:
82
  {rep_answer}
83
 
 
 
 
84
  Conversation History:
85
  {conversation_history}
86
  """
@@ -110,6 +116,8 @@ def get_system_template_openai_short():
110
  You can make conversation but you must follow the command.
111
  If a previous question and answer are provided, you must evaluate the rep's answer.
112
  You will perform evaluation based on how well and thoroughly the rep answered the previous question.
 
 
113
  You will ALWAYS provide your response in valid JSON format
114
  Remember all string values must be enclosed in double quotes.
115
  You will include with the following fields in JSON format:
 
30
 
31
  Rep Answer:
32
  {rep_answer}
33
+
34
+ All Questions and Answers:
35
+ {all_questions_answers}
36
 
37
  """
38
  return user_template
 
84
  Rep Answer:
85
  {rep_answer}
86
 
87
+ All Questions and Answers:
88
+ {all_questions_answers}
89
+
90
  Conversation History:
91
  {conversation_history}
92
  """
 
116
  You can make conversation but you must follow the command.
117
  If a previous question and answer are provided, you must evaluate the rep's answer.
118
  You will perform evaluation based on how well and thoroughly the rep answered the previous question.
119
+ If asked to provide a conclusion, you must consider all of the rep's answers to your questions.
120
+ These are provided in the 'All questions and answers:' section.
121
  You will ALWAYS provide your response in valid JSON format
122
  Remember all string values must be enclosed in double quotes.
123
  You will include with the following fields in JSON format:
utils_simulation.py CHANGED
@@ -51,7 +51,7 @@ async def do_simulation(client, session_state, message):
51
  if session_state.do_voice:
52
  await reply_with_voice(cl, client, message_to_rep)
53
  else:
54
- await cl.Message(message_to_rep).send()
55
  # await cl.Message(this_response).send()
56
  history.append({"role": "assistant", "content": response_content})
57
  cl.user_session.set("history", history)
@@ -74,8 +74,10 @@ async def do_simulation(client, session_state, message):
74
  if session_state.do_evaluation:
75
  await display_evaluation_results(cl, session_state)
76
  else:
 
77
  evaluate_actions = [
78
  cl.Action(name="Evaluate Performance", value="evaluate", description="Evaluate Performance"),
79
  cl.Action(name="Display Queries and Responses", value="display_llm_responses", description="Display LLM Responses")
80
  ]
81
- await cl.Message(content="Click to evaluate", actions=evaluate_actions).send()
 
 
51
  if session_state.do_voice:
52
  await reply_with_voice(cl, client, message_to_rep)
53
  else:
54
+ await cl.Message(content=message_to_rep, author="John Smith").send()
55
  # await cl.Message(this_response).send()
56
  history.append({"role": "assistant", "content": response_content})
57
  cl.user_session.set("history", history)
 
74
  if session_state.do_evaluation:
75
  await display_evaluation_results(cl, session_state)
76
  else:
77
+ await cl.Message(content="**Simulation Complete**").send()
78
  evaluate_actions = [
79
  cl.Action(name="Evaluate Performance", value="evaluate", description="Evaluate Performance"),
80
  cl.Action(name="Display Queries and Responses", value="display_llm_responses", description="Display LLM Responses")
81
  ]
82
+ await cl.Message(content="Click to evaluate performance", actions=evaluate_actions).send()
83
+ await cl.Message(content="\n\n").send()
utils_voice.py CHANGED
@@ -17,7 +17,7 @@ async def reply_with_voice(cl,client, assistant_message):
17
  elements = [
18
  cl.Audio(name="Voice", path=speech_file_path, display="inline")
19
  ]
20
- await cl.Message(content=assistant_message, elements=elements).send()
21
  except Exception as e:
22
  await cl.Message(content=f"Error generating or sending audio: {e}").send()
23
  finally:
 
17
  elements = [
18
  cl.Audio(name="Voice", path=speech_file_path, display="inline")
19
  ]
20
+ await cl.Message(content=assistant_message, elements=elements, author="John Smith").send()
21
  except Exception as e:
22
  await cl.Message(content=f"Error generating or sending audio: {e}").send()
23
  finally: