import streamlit as st import pyaudio import json from vosk import Model, KaldiRecognizer from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer from sentence_transformers import SentenceTransformer import time import pandas as pd from dotenv import load_dotenv import os import numpy as np def cosine_similarity(a, b): return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) class SalesAnalysisApp: def __init__(self): model_name = "tabularisai/multilingual-sentiment-analysis" model = AutoModelForSequenceClassification.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) self.sentiment_analyzer = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) vosk_model_path = os.getenv("VOSK_MODEL_PATH") self.vosk_model = Model(vosk_model_path) self.recognizer = KaldiRecognizer(self.vosk_model, 16000) self.audio = pyaudio.PyAudio() self.stream = None self.product_data = pd.read_csv(r"C:\Users\shaik\Downloads\Sales Calls Transcriptions - Sheet2.csv") self.objection_data = pd.read_csv(r"C:\Users\shaik\Downloads\Sales Calls Transcriptions - Sheet3.csv") self.sentence_model = SentenceTransformer('all-MiniLM-L6-v2') def get_recommendations(self, text): text_embedding = self.sentence_model.encode([text]) product_embeddings = self.sentence_model.encode(self.product_data['Description'].tolist()) similarities = [cosine_similarity(text_embedding[0], prod_emb) for prod_emb in product_embeddings] top_indices = np.argsort(similarities)[-5:][::-1] return self.product_data.iloc[top_indices]['Product'].tolist() def get_objection_response(self, text): text_embedding = self.sentence_model.encode([text]) objection_embeddings = self.sentence_model.encode(self.objection_data['Objection'].tolist()) similarities = [cosine_similarity(text_embedding[0], obj_emb) for obj_emb in objection_embeddings] max_similarity = max(similarities) if max_similarity > 0.5: top_idx = np.argmax(similarities) return self.objection_data.iloc[top_idx]['Response'] return None # Rest of the code remains the same... def analyze_sentiment(self, text): if not text.strip(): return "NEUTRAL", 0.0 result = self.sentiment_analyzer(text.strip().lower())[0] sentiment_map = { 'Very Negative': "NEGATIVE", 'Negative': "NEGATIVE", 'Neutral': "NEUTRAL", 'Positive': "POSITIVE", 'Very Positive': "POSITIVE" } return sentiment_map.get(result['label'], "NEUTRAL"), result['score'] def run_app(self): st.title("Real-time Sales Call Analysis") if st.button("Start Recording"): self.stream = self.audio.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=4000) transcript_placeholder = st.empty() sentiment_placeholder = st.empty() recommendations_placeholder = st.empty() objections_placeholder = st.empty() try: while True: data = self.stream.read(4000, exception_on_overflow=False) if self.recognizer.AcceptWaveform(data): result = json.loads(self.recognizer.Result()) text = result["text"] if text: transcript_placeholder.write(f"Transcription: {text}") sentiment, score = self.analyze_sentiment(text) sentiment_placeholder.write(f"Sentiment: {sentiment} (Score: {score:.2f})") recommendations = self.get_recommendations(text) if recommendations: recommendations_placeholder.write("Product Recommendations:") for rec in recommendations: recommendations_placeholder.write(f"- {rec}") objection_response = self.get_objection_response(text) if objection_response: objections_placeholder.write(f"Suggested Response: {objection_response}") time.sleep(0.1) except Exception as e: st.error(f"Error: {str(e)}") if self.stream: self.stream.stop_stream() self.stream.close() if __name__ == "__main__": app = SalesAnalysisApp() app.run_app()