SalesAI / app.py
Zasha1's picture
Update app.py
a916880 verified
raw
history blame
14 kB
import speech_recognition as sr
from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
from product_recommender import ProductRecommender
from objection_handler import ObjectionHandler
from google_sheets import fetch_call_data, store_data_in_sheet
from sentence_transformers import SentenceTransformer
from env_setup import config
import re
import uuid
import pandas as pd
import plotly.express as px
import streamlit as st
import numpy as np
from io import BytesIO
import wave
import threading
import queue
from streamlit_webrtc import webrtc_streamer, WebRtcMode, AudioProcessorBase
# Initialize components
objection_handler = ObjectionHandler("objections.csv")
product_recommender = ProductRecommender("recommendations.csv")
model = SentenceTransformer('all-MiniLM-L6-v2')
# Queue to hold transcribed text
transcription_queue = queue.Queue()
def generate_comprehensive_summary(chunks):
full_text = " ".join([chunk[0] for chunk in chunks])
total_chunks = len(chunks)
sentiments = [chunk[1] for chunk in chunks]
context_keywords = {
'product_inquiry': ['dress', 'product', 'price', 'stock'],
'pricing': ['cost', 'price', 'budget'],
'negotiation': ['installment', 'payment', 'manage']
}
themes = []
for keyword_type, keywords in context_keywords.items():
if any(keyword.lower() in full_text.lower() for keyword in keywords):
themes.append(keyword_type)
positive_count = sentiments.count('POSITIVE')
negative_count = sentiments.count('NEGATIVE')
neutral_count = sentiments.count('NEUTRAL')
key_interactions = []
for chunk in chunks:
if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
key_interactions.append(chunk[0])
summary = f"Conversation Summary:\n"
if 'product_inquiry' in themes:
summary += "• Customer initiated a product inquiry about items.\n"
if 'pricing' in themes:
summary += "• Price and budget considerations were discussed.\n"
if 'negotiation' in themes:
summary += "• Customer and seller explored flexible payment options.\n"
summary += f"\nConversation Sentiment:\n"
summary += f"• Positive Interactions: {positive_count}\n"
summary += f"• Negative Interactions: {negative_count}\n"
summary += f"• Neutral Interactions: {neutral_count}\n"
summary += "\nKey Conversation Points:\n"
for interaction in key_interactions[:3]:
summary += f"• {interaction}\n"
if positive_count > negative_count:
summary += "\nOutcome: Constructive and potentially successful interaction."
elif negative_count > positive_count:
summary += "\nOutcome: Interaction may require further follow-up."
else:
summary += "\nOutcome: Neutral interaction with potential for future engagement."
return summary
def is_valid_input(text):
text = text.strip().lower()
if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None:
return False
return True
def is_relevant_sentiment(sentiment_score):
return sentiment_score > 0.4
def calculate_overall_sentiment(sentiment_scores):
if sentiment_scores:
average_sentiment = sum(sentiment_scores) / len(sentiment_scores)
overall_sentiment = (
"POSITIVE" if average_sentiment > 0 else
"NEGATIVE" if average_sentiment < 0 else
"NEUTRAL"
)
else:
overall_sentiment = "NEUTRAL"
return overall_sentiment
def handle_objection(text):
query_embedding = model.encode([text])
distances, indices = objection_handler.index.search(query_embedding, 1)
if distances[0][0] < 1.5:
responses = objection_handler.handle_objection(text)
return "\n".join(responses) if responses else "No objection response found."
return "No objection response found."
def transcribe_audio(audio_bytes, sample_rate=16000):
try:
with BytesIO() as wav_buffer:
with wave.open(wav_buffer, 'wb') as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio_bytes)
st.write("Audio saved, attempting transcription...")
chunks = transcribe_with_chunks(wav_buffer.getvalue())
if chunks:
st.write(f"Transcribed chunks: {chunks}")
return chunks[-1][0]
except Exception as e:
st.error(f"Error transcribing audio: {e}")
return None
class AudioProcessor(AudioProcessorBase):
def __init__(self):
self.transcription_queue = transcription_queue
def recv(self, frame):
audio_data = frame.to_ndarray()
st.write(f"Received audio frame: {audio_data.shape}")
audio_bytes = (audio_data * 32767).astype(np.int16).tobytes()
text = transcribe_audio(audio_bytes)
if text:
st.write(f"Transcribed text: {text}")
self.transcription_queue.put(text)
return frame
def real_time_analysis():
st.info("Listening... Say 'stop' to end the process.")
webrtc_ctx = webrtc_streamer(
key="real-time-audio",
mode=WebRtcMode.SENDONLY,
audio_processor_factory=AudioProcessor,
media_stream_constraints={"audio": True, "video": False},
)
if webrtc_ctx.state.playing:
while not transcription_queue.empty():
text = transcription_queue.get()
st.write(f"*Recognized Text:* {text}")
sentiment, score = analyze_sentiment(text)
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
objection_response = handle_objection(text)
st.write(f"*Objection Response:* {objection_response}")
recommendations = []
if is_valid_input(text) and is_relevant_sentiment(score):
query_embedding = model.encode([text])
distances, indices = product_recommender.index.search(query_embedding, 1)
if distances[0][0] < 1.5:
recommendations = product_recommender.get_recommendations(text)
if recommendations:
st.write("*Product Recommendations:*")
for rec in recommendations:
st.write(rec)
def run_app():
st.set_page_config(page_title="Sales Call Assistant", layout="wide")
st.title("AI Sales Call Assistant")
st.markdown("""
<style>
html, body {
font-family: 'Roboto', sans-serif;
background-color: #f5f7fa;
}
.header-container {
background: linear-gradient(135deg, #2980b9, #6dd5fa, #ffffff);
padding: 20px;
border-radius: 15px;
margin-bottom: 30px;
text-align: center;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.section {
background: linear-gradient(135deg, #ffffff, #f5f7fa);
padding: 25px;
border-radius: 15px;
margin-bottom: 30px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.header {
font-size: 2.5em;
font-weight: 800;
color: #2980b9;
margin: 0;
padding: 10px;
letter-spacing: 1px;
}
.subheader {
font-size: 1.8em;
font-weight: 600;
color: #2980b9;
margin-top: 20px;
margin-bottom: 10px;
text-align: left;
}
.table-container {
background: #ffffff;
padding: 20px;
border-radius: 10px;
margin: 20px 0;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.stButton > button {
background: linear-gradient(135deg, #2980b9, #6dd5fa);
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
transition: all 0.3s ease;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.stButton > button:hover {
background: linear-gradient(135deg, #2396dc, #6dd5fa);
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.2);
}
.stTabs [data-baseweb="tab-list"] {
gap: 24px;
background: #f5f7fa;
padding: 10px;
border-radius: 10px;
}
.stTabs [data-baseweb="tab"] {
background-color: transparent;
border-radius: 4px;
color: #2980b9;
font-weight: 600;
padding: 10px 16px;
}
.stTabs [aria-selected="true"] {
background: linear-gradient(120deg, #2980b9, #6dd5fa);
color: white;
}
.success {
background: linear-gradient(135deg, #43A047, #2E7D32);
color: white;
padding: 10px;
border-radius: 5px;
margin: 10px 0;
}
.error {
background: linear-gradient(135deg, #E53935, #C62828);
color: white;
padding: 10px;
border-radius: 5px;
margin: 10px 0;
}
.warning {
background: linear-gradient(135deg, #FB8C00, #F57C00);
color: white;
padding: 10px;
border-radius: 5px;
margin: 10px 0;
}
</style>
""", unsafe_allow_html=True)
st.markdown("""
<div class="header-container">
<h1 class="header">AI Sales Call Assistant</h1>
</div>
""", unsafe_allow_html=True)
st.sidebar.title("Navigation")
app_mode = st.sidebar.radio("Choose a mode:", ["Real-Time Call Analysis", "Dashboard"])
if app_mode == "Real-Time Call Analysis":
st.markdown('<div class="section">', unsafe_allow_html=True)
st.header("Real-Time Sales Call Analysis")
st.markdown('</div>', unsafe_allow_html=True)
if st.button("Start Listening"):
real_time_analysis()
elif app_mode == "Dashboard":
st.markdown('<div class="section">', unsafe_allow_html=True)
st.header("Call Summaries and Sentiment Analysis")
try:
data = fetch_call_data(config["google_sheet_id"])
if data.empty:
st.warning("No data available in the Google Sheet.")
else:
sentiment_counts = data['Sentiment'].value_counts()
product_mentions = filter_product_mentions(data[['Chunk']].values.tolist(), product_titles)
product_mentions_df = pd.DataFrame(list(product_mentions.items()), columns=['Product', 'Count'])
col1, col2 = st.columns(2)
with col1:
st.subheader("Sentiment Distribution")
fig_bar = px.bar(
x=sentiment_counts.index,
y=sentiment_counts.values,
title='Number of Calls by Sentiment',
labels={'x': 'Sentiment', 'y': 'Number of Calls'},
color=sentiment_counts.index,
color_discrete_map={
'POSITIVE': 'green',
'NEGATIVE': 'red',
'NEUTRAL': 'blue'
}
)
st.plotly_chart(fig_bar)
with col2:
st.subheader("Most Mentioned Products")
fig_products = px.pie(
values=product_mentions_df['Count'],
names=product_mentions_df['Product'],
title='Most Mentioned Products'
)
st.plotly_chart(fig_products)
st.subheader("All Calls")
display_data = data.copy()
display_data['Summary Preview'] = display_data['Summary'].str[:100] + '...'
st.dataframe(display_data[['Call ID', 'Chunk', 'Sentiment', 'Summary Preview', 'Overall Sentiment']])
unique_call_ids = data[data['Call ID'] != '']['Call ID'].unique()
call_id = st.selectbox("Select a Call ID to view details:", unique_call_ids)
call_details = data[data['Call ID'] == call_id]
if not call_details.empty:
st.subheader("Detailed Call Information")
st.write(f"**Call ID:** {call_id}")
st.write(f"**Overall Sentiment:** {call_details.iloc[0]['Overall Sentiment']}")
st.subheader("Full Call Summary")
st.text_area("Summary:",
value=call_details.iloc[0]['Summary'],
height=200,
disabled=True)
st.subheader("Conversation Chunks")
for _, row in call_details.iterrows():
if pd.notna(row['Chunk']):
st.write(f"**Chunk:** {row['Chunk']}")
st.write(f"**Sentiment:** {row['Sentiment']}")
st.write("---")
else:
st.error("No details available for the selected Call ID.")
except Exception as e:
st.error(f"Error loading dashboard: {e}")
st.markdown('</div>', unsafe_allow_html=True)
if __name__ == "__main__":
run_app()