Spaces:
Sleeping
Sleeping
Includes deal analysis
Browse files- .env +2 -1
- __pycache__/app.cpython-311.pyc +0 -0
- __pycache__/classes.cpython-311.pyc +0 -0
- __pycache__/utils_callbacks.cpython-311.pyc +0 -0
- __pycache__/utils_customer_research.cpython-311.pyc +0 -0
- __pycache__/utils_opportunity_review.cpython-311.pyc +0 -0
- __pycache__/utils_output.cpython-311.pyc +0 -0
- __pycache__/utils_prep.cpython-311.pyc +0 -0
- app.py +1 -0
- classes.py +6 -2
- data/HSBC Opportunity Information.docx +0 -0
- reports/HSBC Opportunity Review Report.md +26 -0
- requirements.txt +2 -1
- utils_callbacks.py +0 -2
- utils_customer_research.py +2 -1
- utils_opportunity_review.py +355 -0
- utils_prep.py +30 -19
.env
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
OPENAI_API_KEY="sk-FG-bvK9Xs1xfayztqm-POKfx4IlONcg488PnXTmCDvT3BlbkFJeVuGgSO0YQIQS7i5JrqfcZ0hrUyEylpaUQanJ0hawA"
|
2 |
-
TAVILY_API_KEY="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"
|
|
|
|
1 |
OPENAI_API_KEY="sk-FG-bvK9Xs1xfayztqm-POKfx4IlONcg488PnXTmCDvT3BlbkFJeVuGgSO0YQIQS7i5JrqfcZ0hrUyEylpaUQanJ0hawA"
|
2 |
+
TAVILY_API_KEY="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"
|
3 |
+
LANGCHAIN_API_KEY="lsck_01H3333333333333333333333333333333"
|
__pycache__/app.cpython-311.pyc
CHANGED
Binary files a/__pycache__/app.cpython-311.pyc and b/__pycache__/app.cpython-311.pyc differ
|
|
__pycache__/classes.cpython-311.pyc
CHANGED
Binary files a/__pycache__/classes.cpython-311.pyc and b/__pycache__/classes.cpython-311.pyc differ
|
|
__pycache__/utils_callbacks.cpython-311.pyc
CHANGED
Binary files a/__pycache__/utils_callbacks.cpython-311.pyc and b/__pycache__/utils_callbacks.cpython-311.pyc differ
|
|
__pycache__/utils_customer_research.cpython-311.pyc
CHANGED
Binary files a/__pycache__/utils_customer_research.cpython-311.pyc and b/__pycache__/utils_customer_research.cpython-311.pyc differ
|
|
__pycache__/utils_opportunity_review.cpython-311.pyc
ADDED
Binary file (23 kB). View file
|
|
__pycache__/utils_output.cpython-311.pyc
CHANGED
Binary files a/__pycache__/utils_output.cpython-311.pyc and b/__pycache__/utils_output.cpython-311.pyc differ
|
|
__pycache__/utils_prep.cpython-311.pyc
CHANGED
Binary files a/__pycache__/utils_prep.cpython-311.pyc and b/__pycache__/utils_prep.cpython-311.pyc differ
|
|
app.py
CHANGED
@@ -21,6 +21,7 @@ from utils_simulation import do_simulation
|
|
21 |
dotenv.load_dotenv()
|
22 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
23 |
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
|
|
24 |
|
25 |
llm_model = "gpt-4o-mini"
|
26 |
set_llm_cache(InMemoryCache())
|
|
|
21 |
dotenv.load_dotenv()
|
22 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
23 |
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
24 |
+
langchain_api_key = os.getenv("LANGCHAIN_API_KEY")
|
25 |
|
26 |
llm_model = "gpt-4o-mini"
|
27 |
set_llm_cache(InMemoryCache())
|
classes.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
class SessionState:
|
2 |
session_stage = ""
|
3 |
do_evaluation = False
|
4 |
-
do_opportunity_analysis =
|
5 |
do_customer_research = False
|
6 |
do_objections = False
|
7 |
add_objections_to_analysis = True
|
@@ -33,10 +33,12 @@ class SessionState:
|
|
33 |
command = ""
|
34 |
scores = []
|
35 |
llm_next_steps = ""
|
|
|
|
|
36 |
def __init__(self):
|
37 |
self.session_stage = "research"
|
38 |
self.do_evaluation = False
|
39 |
-
self.do_opportunity_analysis =
|
40 |
self.do_customer_research = True
|
41 |
self.do_objections = False
|
42 |
self.add_objections_to_analysis = False
|
@@ -72,6 +74,8 @@ class SessionState:
|
|
72 |
self.customer = None
|
73 |
self.opportunity = None
|
74 |
self.scenario = None
|
|
|
|
|
75 |
class Company:
|
76 |
def __init__(self, name, description, product, product_summary, product_description):
|
77 |
self.name = name
|
|
|
1 |
class SessionState:
|
2 |
session_stage = ""
|
3 |
do_evaluation = False
|
4 |
+
do_opportunity_analysis = True
|
5 |
do_customer_research = False
|
6 |
do_objections = False
|
7 |
add_objections_to_analysis = True
|
|
|
33 |
command = ""
|
34 |
scores = []
|
35 |
llm_next_steps = ""
|
36 |
+
opportunity_review_results = None
|
37 |
+
opportunity_review_report = None
|
38 |
def __init__(self):
|
39 |
self.session_stage = "research"
|
40 |
self.do_evaluation = False
|
41 |
+
self.do_opportunity_analysis = True
|
42 |
self.do_customer_research = True
|
43 |
self.do_objections = False
|
44 |
self.add_objections_to_analysis = False
|
|
|
74 |
self.customer = None
|
75 |
self.opportunity = None
|
76 |
self.scenario = None
|
77 |
+
self.opportunity_review_results = None
|
78 |
+
self.opportunity_review_report = None
|
79 |
class Company:
|
80 |
def __init__(self, name, description, product, product_summary, product_description):
|
81 |
self.name = name
|
data/HSBC Opportunity Information.docx
ADDED
Binary file (292 kB). View file
|
|
reports/HSBC Opportunity Review Report.md
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
**Analysis Results**
|
2 |
+
|
3 |
+
**Summary:** The opportunity with HSBC involves offering a new analytics engine for their loan origination system, which is currently slow and inflexible. The urgency for decision-making stems from their upcoming renewal with the existing vendor. Multiple meetings have taken place, including an initial discovery call, a technical deep dive, and a proposal review, with HSBC expressing interest in moving forward. The next step involves further discussions with John Smith, the VP of Information Technology, to potentially proceed to a pilot program or final negotiations.
|
4 |
+
|
5 |
+
**Score:** 75
|
6 |
+
|
7 |
+
**MEDDIC Evaluation:**
|
8 |
+
|
9 |
+
**Metrics:** 80
|
10 |
+
**Evaluation:** John Smith appears to be a potential champion, given his active involvement. However, it is uncertain how strongly he advocates for the solution internally or if there are other champions within HSBC. Further development of internal champions may be needed.
|
11 |
+
**Economic Buyer:** 70
|
12 |
+
**Decision Criteria:** 75
|
13 |
+
**Decision Process:** 70
|
14 |
+
**Identify Pain:** 85
|
15 |
+
**Champion:** 65
|
16 |
+
|
17 |
+
|
18 |
+
**Next Steps**
|
19 |
+
|
20 |
+
Schedule a meeting with John Smith to deepen the understanding of HSBC's decision-making process and to identify additional internal champions. Aim to set the groundwork for a pilot program or final negotiations.
|
21 |
+
|
22 |
+
**Talking Points:**
|
23 |
+
|
24 |
+
1. **Reinforce the Value Proposition**: Highlight how the new analytics engine will enhance HSBC's loan origination system by improving speed and flexibility, directly addressing the current pain points
|
25 |
+
2. **Discuss the Timeline and Urgency**: Emphasize the importance of aligning with HSBC's renewal timeline to ensure a smooth transition from their existing vendor, and address any potential roadblocks in the decision-making process
|
26 |
+
3. **Identify and Develop Additional Champions**: Explore who else within HSBC might benefit from or support the new solution, and strategize on how to engage these stakeholders to strengthen internal advocacy
|
requirements.txt
CHANGED
@@ -52,4 +52,5 @@ openai-whisper==20240930
|
|
52 |
# Nitins dependencies
|
53 |
fpdf==1.7.2
|
54 |
tavily-python==0.5.0
|
55 |
-
langgraph==0.2.34
|
|
|
|
52 |
# Nitins dependencies
|
53 |
fpdf==1.7.2
|
54 |
tavily-python==0.5.0
|
55 |
+
langgraph==0.2.34
|
56 |
+
docx2txt==0.8
|
utils_callbacks.py
CHANGED
@@ -65,8 +65,6 @@ async def callback_run_scenario(action):
|
|
65 |
]
|
66 |
await cl.Message(content="Click to start simulation", actions=start_actions).send()
|
67 |
|
68 |
-
|
69 |
-
|
70 |
async def callback_start_scenario():
|
71 |
print("callback_start_scenario()")
|
72 |
session_state = cl.user_session.get("session_state", None)
|
|
|
65 |
]
|
66 |
await cl.Message(content="Click to start simulation", actions=start_actions).send()
|
67 |
|
|
|
|
|
68 |
async def callback_start_scenario():
|
69 |
print("callback_start_scenario()")
|
70 |
session_state = cl.user_session.get("session_state", None)
|
utils_customer_research.py
CHANGED
@@ -334,4 +334,5 @@ async def generete_pdf(state: ResearchState):
|
|
334 |
|
335 |
msg = generate_pdf_from_md(state['report'], filename=pdf_file_path)
|
336 |
|
337 |
-
return {"messages": [AIMessage(content=msg)]}
|
|
|
|
334 |
|
335 |
msg = generate_pdf_from_md(state['report'], filename=pdf_file_path)
|
336 |
|
337 |
+
return {"messages": [AIMessage(content=msg)]}
|
338 |
+
|
utils_opportunity_review.py
ADDED
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from langchain.document_loaders import CSVLoader, PyPDFLoader, Docx2txtLoader
|
4 |
+
from langgraph.graph import StateGraph, END
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
from langchain.schema import Document, AIMessage
|
7 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
+
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
9 |
+
from pathlib import Path
|
10 |
+
from pydantic import BaseModel, Field
|
11 |
+
from qdrant_client import QdrantClient
|
12 |
+
from qdrant_client.models import Distance, VectorParams, PointStruct
|
13 |
+
from typing import List, Dict, Any
|
14 |
+
|
15 |
+
from pydantic import BaseModel, Field
|
16 |
+
from typing import Dict, Any
|
17 |
+
|
18 |
+
|
19 |
+
llm = ChatOpenAI(model_name="gpt-4o")
|
20 |
+
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
|
21 |
+
qdrant = QdrantClient(":memory:") # In-memory Qdrant instance
|
22 |
+
|
23 |
+
# Create collection
|
24 |
+
qdrant.create_collection(
|
25 |
+
collection_name="opportunities",
|
26 |
+
vectors_config=VectorParams(size=1536, distance=Distance.COSINE),
|
27 |
+
)
|
28 |
+
|
29 |
+
class State(BaseModel):
|
30 |
+
file_path: str
|
31 |
+
document_processed: str = ""
|
32 |
+
opportunity_evaluation: Dict[str, Any] = Field(default_factory=dict)
|
33 |
+
next_action: Dict[str, Any] = Field(default_factory=dict)
|
34 |
+
|
35 |
+
def dict_representation(self) -> Dict[str, Any]:
|
36 |
+
return {
|
37 |
+
"file_path": self.file_path,
|
38 |
+
"document_processed": self.document_processed,
|
39 |
+
"opportunity_evaluation": self.opportunity_evaluation,
|
40 |
+
"next_action": self.next_action
|
41 |
+
}
|
42 |
+
|
43 |
+
async def prep_opportunity_review(session_state):
|
44 |
+
file_path = prep_document()
|
45 |
+
structured_results = run_analysis(file_path)
|
46 |
+
opportunity_review_report = create_opportunity_review_report(structured_results)
|
47 |
+
session_state.opportunity_review_results = structured_results
|
48 |
+
session_state.opportunity_review_report = opportunity_review_report
|
49 |
+
|
50 |
+
|
51 |
+
def prep_document():
|
52 |
+
file_path = "data/HSBC Opportunity Information.docx"
|
53 |
+
path = Path(file_path)
|
54 |
+
|
55 |
+
if path.exists():
|
56 |
+
if path.is_file():
|
57 |
+
print(f"File found: {path}")
|
58 |
+
print(f"File size: {path.stat().st_size / 1024:.2f} KB")
|
59 |
+
print(f"Last modified: {path.stat().st_mtime}")
|
60 |
+
print("File is ready for processing.")
|
61 |
+
if os.access(path, os.R_OK):
|
62 |
+
print("File is readable.")
|
63 |
+
else:
|
64 |
+
print("Warning: File exists but may not be readable. Check permissions.")
|
65 |
+
else:
|
66 |
+
print(f"Error: {path} exists but is not a file. It might be a directory.")
|
67 |
+
else:
|
68 |
+
print(f"Error: File not found at {path}")
|
69 |
+
print("Please check the following:")
|
70 |
+
print("1. Ensure the file path is correct.")
|
71 |
+
print("2. Verify that the file exists in the specified location.")
|
72 |
+
print("3. Check if you have the necessary permissions to access the file.")
|
73 |
+
|
74 |
+
parent = path.parent
|
75 |
+
if not parent.exists():
|
76 |
+
print(f"Note: The directory {parent} does not exist.")
|
77 |
+
elif not parent.is_dir():
|
78 |
+
print(f"Note: {parent} exists but is not a directory.")
|
79 |
+
|
80 |
+
file_path_for_processing = str(path)
|
81 |
+
return file_path_for_processing
|
82 |
+
|
83 |
+
def load_and_chunk_document(file_path: str) -> List[Document]:
|
84 |
+
"""Load and chunk the document based on file type."""
|
85 |
+
if not os.path.exists(file_path):
|
86 |
+
raise FileNotFoundError(f"File not found: {file_path}")
|
87 |
+
|
88 |
+
_, file_extension = os.path.splitext(file_path.lower())
|
89 |
+
|
90 |
+
if file_extension == '.csv':
|
91 |
+
loader = CSVLoader(file_path)
|
92 |
+
elif file_extension == '.pdf':
|
93 |
+
loader = PyPDFLoader(file_path)
|
94 |
+
elif file_extension == '.docx':
|
95 |
+
loader = Docx2txtLoader(file_path)
|
96 |
+
else:
|
97 |
+
raise ValueError(f"Unsupported file type: {file_extension}")
|
98 |
+
|
99 |
+
documents = loader.load()
|
100 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
101 |
+
return text_splitter.split_documents(documents)
|
102 |
+
|
103 |
+
def agent_1(file_path: str) -> str:
|
104 |
+
"""Agent 1: Load, chunk, embed, and store document in Qdrant."""
|
105 |
+
try:
|
106 |
+
chunks = load_and_chunk_document(file_path)
|
107 |
+
points = []
|
108 |
+
for i, chunk in enumerate(chunks):
|
109 |
+
vector = embeddings.embed_query(chunk.page_content)
|
110 |
+
points.append(PointStruct(id=i, vector=vector, payload={"text": chunk.page_content}))
|
111 |
+
|
112 |
+
qdrant.upsert(
|
113 |
+
collection_name="opportunities",
|
114 |
+
points=points
|
115 |
+
)
|
116 |
+
return f"Document processed and stored in Qdrant. {len(chunks)} chunks created."
|
117 |
+
except Exception as e:
|
118 |
+
print(f"Error in agent_1: {str(e)}")
|
119 |
+
return f"Error processing document: {str(e)}"
|
120 |
+
|
121 |
+
def agent_2() -> Dict[str, Any]:
|
122 |
+
"""Agent 2: Evaluate opportunity based on MEDDIC criteria."""
|
123 |
+
try:
|
124 |
+
results = qdrant.scroll(collection_name="opportunities", limit=100)
|
125 |
+
if not results or len(results[0]) == 0:
|
126 |
+
raise ValueError("No documents found in Qdrant")
|
127 |
+
|
128 |
+
full_text = " ".join([point.payload.get("text", "") for point in results[0]])
|
129 |
+
|
130 |
+
meddic_template = """
|
131 |
+
Analyze the following opportunity information using the MEDDIC sales methodology:
|
132 |
+
|
133 |
+
{opportunity_info}
|
134 |
+
|
135 |
+
Assign an overall opportunity score (1-100) with 100 means that the opportunity is a sure win.
|
136 |
+
|
137 |
+
Provide a Summary of the opportunity.
|
138 |
+
|
139 |
+
Evaluate the opportunity based on each MEDDIC criterion and assign a score for each criterion:
|
140 |
+
1. Metrics
|
141 |
+
2. Economic Buyer
|
142 |
+
3. Decision Criteria
|
143 |
+
4. Decision Process
|
144 |
+
5. Identify Pain
|
145 |
+
6. Champion
|
146 |
+
|
147 |
+
Format your response as follows:
|
148 |
+
Summary: [Opportunity Summary]
|
149 |
+
Score: [Overall Opportunity Score between 1 to 100 based on MEDDIC criteria]
|
150 |
+
MEDDIC Evaluation:
|
151 |
+
- Metrics: [Score on Metrics, Evaluation on Metrics criterion]
|
152 |
+
- Economic Buyer: [Score on Economic Buyer, Evaluation on Economic Buyer criterion]
|
153 |
+
- Decision Criteria: [Score on Decision Criteria, Evaluation on Decision Criteria criterion]
|
154 |
+
- Decision Process: [Score on Decision Process, Evaluation on Decision Process criterion]
|
155 |
+
- Identify Pain: [Score on Identify Pain, Evaluation on Identify Pain criterion]
|
156 |
+
- Champion: [Score on Champion, Evaluation on Champion criterion]
|
157 |
+
"""
|
158 |
+
|
159 |
+
meddic_prompt = PromptTemplate(template=meddic_template, input_variables=["opportunity_info"])
|
160 |
+
meddic_chain = meddic_prompt | llm
|
161 |
+
|
162 |
+
response = meddic_chain.invoke({"opportunity_info": full_text})
|
163 |
+
|
164 |
+
if isinstance(response, AIMessage):
|
165 |
+
response_content = response.content
|
166 |
+
elif isinstance(response, str):
|
167 |
+
response_content = response
|
168 |
+
else:
|
169 |
+
raise ValueError(f"Unexpected response type: {type(response)}")
|
170 |
+
|
171 |
+
# Parse the response content
|
172 |
+
lines = response_content.split('\n')
|
173 |
+
summary = next((line.split('Summary:')[1].strip() for line in lines if line.startswith('Summary:')), 'N/A')
|
174 |
+
score = next((int(line.split('Score:')[1].strip()) for line in lines if line.startswith('Score:')), 0)
|
175 |
+
meddic_eval = {}
|
176 |
+
current_criterion = None
|
177 |
+
for line in lines:
|
178 |
+
if line.strip().startswith('-'):
|
179 |
+
parts = line.split(':', 1)
|
180 |
+
if len(parts) == 2:
|
181 |
+
current_criterion = parts[0].strip('- ')
|
182 |
+
meddic_eval[current_criterion] = parts[1].strip()
|
183 |
+
elif current_criterion and line.strip():
|
184 |
+
meddic_eval[current_criterion] += ' ' + line.strip()
|
185 |
+
|
186 |
+
return {
|
187 |
+
'summary': summary,
|
188 |
+
'score': score,
|
189 |
+
'meddic_evaluation': meddic_eval
|
190 |
+
}
|
191 |
+
|
192 |
+
except Exception as e:
|
193 |
+
print(f"Error in agent_2: {str(e)}")
|
194 |
+
return {
|
195 |
+
'summary': "Error occurred during evaluation",
|
196 |
+
'score': 0,
|
197 |
+
'meddic_evaluation': str(e)
|
198 |
+
}
|
199 |
+
|
200 |
+
def agent_3(meddic_evaluation: Dict[str, Any]) -> Dict[str, Any]:
|
201 |
+
"""Agent 3: Suggest next best action and talking points."""
|
202 |
+
try:
|
203 |
+
next_action_template = """
|
204 |
+
Based on the following MEDDIC evaluation of an opportunity:
|
205 |
+
|
206 |
+
{meddic_evaluation}
|
207 |
+
|
208 |
+
Suggest the next best action for the upcoming customer meeting and provide the top 3 talking points.
|
209 |
+
Format your response as follows:
|
210 |
+
Next Action: [Your suggested action]
|
211 |
+
Talking Points:
|
212 |
+
1. [First talking point]
|
213 |
+
2. [Second talking point]
|
214 |
+
3. [Third talking point]
|
215 |
+
"""
|
216 |
+
|
217 |
+
next_action_prompt = PromptTemplate(template=next_action_template, input_variables=["meddic_evaluation"])
|
218 |
+
next_action_chain = next_action_prompt | llm
|
219 |
+
|
220 |
+
response = next_action_chain.invoke({"meddic_evaluation": json.dumps(meddic_evaluation)})
|
221 |
+
|
222 |
+
if isinstance(response, AIMessage):
|
223 |
+
response_content = response.content
|
224 |
+
elif isinstance(response, str):
|
225 |
+
response_content = response
|
226 |
+
else:
|
227 |
+
raise ValueError(f"Unexpected response type: {type(response)}")
|
228 |
+
|
229 |
+
# Parse the response content
|
230 |
+
lines = response_content.split('\n')
|
231 |
+
next_action = next((line.split('Next Action:')[1].strip() for line in lines if line.startswith('Next Action:')), 'N/A')
|
232 |
+
talking_points = [line.split('.')[1].strip() for line in lines if line.strip().startswith(('1.', '2.', '3.'))]
|
233 |
+
|
234 |
+
return {
|
235 |
+
'next_action': next_action,
|
236 |
+
'talking_points': talking_points
|
237 |
+
}
|
238 |
+
except Exception as e:
|
239 |
+
print(f"Error in agent_3: {str(e)}")
|
240 |
+
return {
|
241 |
+
'next_action': "Error occurred while suggesting next action",
|
242 |
+
'talking_points': [str(e)]
|
243 |
+
}
|
244 |
+
|
245 |
+
def process_document(state: State) -> State:
|
246 |
+
print("Agent 1: Processing document...")
|
247 |
+
file_path = state.file_path
|
248 |
+
result = agent_1(file_path)
|
249 |
+
return State(file_path=state.file_path, document_processed=result)
|
250 |
+
|
251 |
+
def evaluate_opportunity(state: State) -> State:
|
252 |
+
print("Agent 2: Evaluating opportunity...")
|
253 |
+
result = agent_2()
|
254 |
+
return State(file_path=state.file_path, document_processed=state.document_processed, opportunity_evaluation=result)
|
255 |
+
|
256 |
+
def suggest_next_action(state: State) -> State:
|
257 |
+
print("Agent 3: Suggesting next actions...")
|
258 |
+
result = agent_3(state.opportunity_evaluation)
|
259 |
+
return State(file_path=state.file_path, document_processed=state.document_processed, opportunity_evaluation=state.opportunity_evaluation, next_action=result)
|
260 |
+
|
261 |
+
def define_graph() -> StateGraph:
|
262 |
+
workflow = StateGraph(State)
|
263 |
+
|
264 |
+
workflow.add_node("process_document", process_document)
|
265 |
+
workflow.add_node("evaluate_opportunity", evaluate_opportunity)
|
266 |
+
workflow.add_node("suggest_next_action", suggest_next_action)
|
267 |
+
|
268 |
+
workflow.set_entry_point("process_document")
|
269 |
+
workflow.add_edge("process_document", "evaluate_opportunity")
|
270 |
+
workflow.add_edge("evaluate_opportunity", "suggest_next_action")
|
271 |
+
|
272 |
+
return workflow
|
273 |
+
|
274 |
+
|
275 |
+
def run_analysis(file_path: str) -> Dict[str, Any]:
|
276 |
+
if not os.path.exists(file_path):
|
277 |
+
return {"error": f"File not found: {file_path}"}
|
278 |
+
|
279 |
+
graph = define_graph()
|
280 |
+
initial_state = State(file_path=file_path)
|
281 |
+
|
282 |
+
try:
|
283 |
+
app = graph.compile()
|
284 |
+
final_state = app.invoke(initial_state)
|
285 |
+
|
286 |
+
# Convert the final state to a dictionary manually
|
287 |
+
structured_results = {
|
288 |
+
"file_path": final_state["file_path"],
|
289 |
+
"document_processed": final_state["document_processed"],
|
290 |
+
"opportunity_evaluation": final_state["opportunity_evaluation"],
|
291 |
+
"next_action": final_state["next_action"]
|
292 |
+
}
|
293 |
+
|
294 |
+
# Print a summary of the results
|
295 |
+
print("\n--- Analysis Results ---")
|
296 |
+
print(f"Document Processing: {'Successful' if 'Error' not in structured_results['document_processed'] else 'Failed'}")
|
297 |
+
print(f"Details: {structured_results['document_processed']}")
|
298 |
+
|
299 |
+
if isinstance(structured_results['opportunity_evaluation'], dict):
|
300 |
+
print("\nOpportunity Evaluation:")
|
301 |
+
print(f"Summary: {structured_results['opportunity_evaluation'].get('summary', 'N/A')}")
|
302 |
+
print(f"Score: {structured_results['opportunity_evaluation'].get('score', 'N/A')}")
|
303 |
+
print("MEDDIC Evaluation:")
|
304 |
+
for criterion, evaluation in structured_results['opportunity_evaluation'].get('meddic_evaluation', {}).items():
|
305 |
+
print(f"{criterion}: {evaluation}")
|
306 |
+
else:
|
307 |
+
print("\nOpportunity Evaluation:")
|
308 |
+
print(f"Error: {structured_results['opportunity_evaluation']}")
|
309 |
+
|
310 |
+
if isinstance(structured_results['next_action'], dict):
|
311 |
+
print("\nNext Action:")
|
312 |
+
print(f"Action: {structured_results['next_action'].get('next_action', 'N/A')}")
|
313 |
+
print("Talking Points:")
|
314 |
+
for i, point in enumerate(structured_results['next_action'].get('talking_points', []), 1):
|
315 |
+
print(f" {i}. {point}")
|
316 |
+
else:
|
317 |
+
print("\nNext Action:")
|
318 |
+
print(f"Error: {structured_results['next_action']}")
|
319 |
+
|
320 |
+
return structured_results
|
321 |
+
|
322 |
+
except Exception as e:
|
323 |
+
print(f"An error occurred during analysis: {str(e)}")
|
324 |
+
return {"error": str(e)}
|
325 |
+
|
326 |
+
def create_opportunity_review_report(structured_results):
|
327 |
+
opportunity_review_report = ""
|
328 |
+
opportunity_review_report += "**Analysis Results**\n\n"
|
329 |
+
if 'Error' in structured_results['document_processed']:
|
330 |
+
opportunity_review_report += f"Opportunity Analysis Failed\n"
|
331 |
+
|
332 |
+
else:
|
333 |
+
if isinstance(structured_results['opportunity_evaluation'], dict):
|
334 |
+
opportunity_review_report += f"**Summary:** {structured_results['opportunity_evaluation'].get('summary', 'N/A')}\n\n"
|
335 |
+
opportunity_review_report += f"**Score:** {structured_results['opportunity_evaluation'].get('score', 'N/A')}\n\n"
|
336 |
+
opportunity_review_report += "**MEDDIC Evaluation:**\n\n"
|
337 |
+
for criterion, evaluation in structured_results['opportunity_evaluation'].get('meddic_evaluation', {}).items():
|
338 |
+
opportunity_review_report += f"**{criterion}:** {evaluation}\n"
|
339 |
+
|
340 |
+
if isinstance(structured_results['next_action'], dict):
|
341 |
+
opportunity_review_report += "\n\n**Next Steps**\n\n"
|
342 |
+
opportunity_review_report += f"{structured_results['next_action'].get('next_action', 'N/A')}\n\n"
|
343 |
+
opportunity_review_report += "**Talking Points:**\n\n"
|
344 |
+
for i, point in enumerate(structured_results['next_action'].get('talking_points', []), 1):
|
345 |
+
opportunity_review_report += f" {i}. {point}\n"
|
346 |
+
file_path = "reports/HSBC Opportunity Review Report.md"
|
347 |
+
save_md_file(file_path, opportunity_review_report)
|
348 |
+
return opportunity_review_report
|
349 |
+
|
350 |
+
def save_md_file(file_path, file_content):
|
351 |
+
if os.path.exists(file_path):
|
352 |
+
os.remove(file_path)
|
353 |
+
print(f"Existing file deleted: {file_path}")
|
354 |
+
with open(file_path, 'w', encoding='utf-8') as md_file:
|
355 |
+
md_file.write(file_content)
|
utils_prep.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
import asyncio
|
2 |
import chainlit as cl
|
|
|
3 |
from langchain_openai import ChatOpenAI
|
4 |
|
5 |
from utils_actions import offer_actions,offer_initial_actions
|
|
|
6 |
from utils_data import get_company_data, get_opportunities
|
7 |
-
from utils_prompt import get_chat_prompt
|
8 |
from utils_objections import create_objections
|
9 |
-
|
|
|
10 |
|
11 |
async def prep_start(session_state):
|
12 |
|
@@ -64,7 +66,11 @@ async def prep_opportunity_analysis():
|
|
64 |
await cl.Message(content=opportunity_analysis_message).send()
|
65 |
|
66 |
if session_state.do_opportunity_analysis:
|
67 |
-
|
|
|
|
|
|
|
|
|
68 |
else:
|
69 |
|
70 |
agent_1_message = "*Retrieving data from SalesForce CRM ...*"
|
@@ -85,24 +91,29 @@ async def prep_opportunity_analysis():
|
|
85 |
await asyncio.sleep(1)
|
86 |
output_message = "**Analysis Results**"
|
87 |
await cl.Message(content=output_message).send()
|
88 |
-
output_messages = get_opportunity_analysis()
|
89 |
-
for output_message in output_messages:
|
90 |
-
await cl.Message(content=output_message).send()
|
91 |
-
await cl.Message(content="").send()
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
await cl.Message(content=
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
await cl.Message(content=output_message).send()
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
106 |
|
107 |
await offer_actions()
|
108 |
|
|
|
1 |
import asyncio
|
2 |
import chainlit as cl
|
3 |
+
import os
|
4 |
from langchain_openai import ChatOpenAI
|
5 |
|
6 |
from utils_actions import offer_actions,offer_initial_actions
|
7 |
+
from utils_customer_research import read_markdown_file
|
8 |
from utils_data import get_company_data, get_opportunities
|
|
|
9 |
from utils_objections import create_objections
|
10 |
+
from utils_opportunity_review import prep_opportunity_review
|
11 |
+
from utils_prompt import get_chat_prompt
|
12 |
|
13 |
async def prep_start(session_state):
|
14 |
|
|
|
66 |
await cl.Message(content=opportunity_analysis_message).send()
|
67 |
|
68 |
if session_state.do_opportunity_analysis:
|
69 |
+
agent_1_message = "*Retrieving and reviewing opportunity data from SalesForce CRM ...*"
|
70 |
+
await cl.Message(content=agent_1_message).send()
|
71 |
+
await prep_opportunity_review(session_state)
|
72 |
+
report = session_state.opportunity_review_report
|
73 |
+
await cl.Message(content=report).send()
|
74 |
else:
|
75 |
|
76 |
agent_1_message = "*Retrieving data from SalesForce CRM ...*"
|
|
|
91 |
await asyncio.sleep(1)
|
92 |
output_message = "**Analysis Results**"
|
93 |
await cl.Message(content=output_message).send()
|
|
|
|
|
|
|
|
|
94 |
|
95 |
+
markdown_file_path = "reports/HSBC Opportunity Review Report.md"
|
96 |
+
if os.path.exists(markdown_file_path):
|
97 |
+
await cl.Message(content=read_markdown_file(markdown_file_path)).send()
|
98 |
+
else:
|
99 |
+
output_messages = get_opportunity_analysis()
|
100 |
+
for output_message in output_messages:
|
101 |
+
await cl.Message(content=output_message).send()
|
102 |
+
await cl.Message(content="").send()
|
103 |
+
|
104 |
+
if session_state.add_objections_to_analysis:
|
105 |
+
output_message = "**Risks**"
|
106 |
+
await cl.Message(content=output_message).send()
|
107 |
+
for obj in session_state.objections:
|
108 |
+
await cl.Message(content=obj).send()
|
109 |
+
|
110 |
+
output_message = "**Next Steps**"
|
111 |
await cl.Message(content=output_message).send()
|
112 |
+
output_messages = get_next_steps()
|
113 |
+
for output_message in output_messages:
|
114 |
+
await cl.Message(content=output_message).send()
|
115 |
+
await cl.Message(content="").send()
|
116 |
+
await cl.Message(content="\n\n").send()
|
117 |
|
118 |
await offer_actions()
|
119 |
|