Zasha1 commited on
Commit
2a778a6
·
verified ·
1 Parent(s): 710c7ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -120
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from streamlit_webrtc import webrtc_streamer, WebRtcMode
2
  from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
@@ -11,9 +11,6 @@ import pandas as pd
11
  import plotly.express as px
12
  import streamlit as st
13
  import numpy as np
14
- from io import BytesIO
15
- import wave
16
- import threading
17
  import queue
18
 
19
  # Initialize components
@@ -25,94 +22,20 @@ model = SentenceTransformer('all-MiniLM-L6-v2')
25
  transcription_queue = queue.Queue()
26
 
27
  def generate_comprehensive_summary(chunks):
28
- """
29
- Generate a comprehensive summary from conversation chunks
30
- """
31
- # Extract full text from chunks
32
- full_text = " ".join([chunk[0] for chunk in chunks])
33
-
34
- # Perform basic analysis
35
- total_chunks = len(chunks)
36
- sentiments = [chunk[1] for chunk in chunks]
37
-
38
- # Determine overall conversation context
39
- context_keywords = {
40
- 'product_inquiry': ['dress', 'product', 'price', 'stock'],
41
- 'pricing': ['cost', 'price', 'budget'],
42
- 'negotiation': ['installment', 'payment', 'manage']
43
- }
44
-
45
- # Detect conversation themes
46
- themes = []
47
- for keyword_type, keywords in context_keywords.items():
48
- if any(keyword.lower() in full_text.lower() for keyword in keywords):
49
- themes.append(keyword_type)
50
-
51
- # Basic sentiment analysis
52
- positive_count = sentiments.count('POSITIVE')
53
- negative_count = sentiments.count('NEGATIVE')
54
- neutral_count = sentiments.count('NEUTRAL')
55
-
56
- # Key interaction highlights
57
- key_interactions = []
58
- for chunk in chunks:
59
- if any(keyword.lower() in chunk[0].lower() for keyword in ['price', 'dress', 'stock', 'installment']):
60
- key_interactions.append(chunk[0])
61
-
62
- # Construct summary
63
- summary = f"Conversation Summary:\n"
64
-
65
- # Context and themes
66
- if 'product_inquiry' in themes:
67
- summary += "• Customer initiated a product inquiry about items.\n"
68
-
69
- if 'pricing' in themes:
70
- summary += "• Price and budget considerations were discussed.\n"
71
-
72
- if 'negotiation' in themes:
73
- summary += "• Customer and seller explored flexible payment options.\n"
74
-
75
- # Sentiment insights
76
- summary += f"\nConversation Sentiment:\n"
77
- summary += f"• Positive Interactions: {positive_count}\n"
78
- summary += f"• Negative Interactions: {negative_count}\n"
79
- summary += f"• Neutral Interactions: {neutral_count}\n"
80
-
81
- # Key highlights
82
- summary += "\nKey Conversation Points:\n"
83
- for interaction in key_interactions[:3]: # Limit to top 3 key points
84
- summary += f"• {interaction}\n"
85
-
86
- # Conversation outcome
87
- if positive_count > negative_count:
88
- summary += "\nOutcome: Constructive and potentially successful interaction."
89
- elif negative_count > positive_count:
90
- summary += "\nOutcome: Interaction may require further follow-up."
91
- else:
92
- summary += "\nOutcome: Neutral interaction with potential for future engagement."
93
-
94
- return summary
95
 
96
  def is_valid_input(text):
97
- text = text.strip().lower()
98
- if len(text) < 3 or re.match(r'^[a-zA-Z\s]*$', text) is None:
99
- return False
100
- return True
101
 
102
  def is_relevant_sentiment(sentiment_score):
103
- return sentiment_score > 0.4
 
104
 
105
  def calculate_overall_sentiment(sentiment_scores):
106
- if sentiment_scores:
107
- average_sentiment = sum(sentiment_scores) / len(sentiment_scores)
108
- overall_sentiment = (
109
- "POSITIVE" if average_sentiment > 0 else
110
- "NEGATIVE" if average_sentiment < 0 else
111
- "NEUTRAL"
112
- )
113
- else:
114
- overall_sentiment = "NEUTRAL"
115
- return overall_sentiment
116
 
117
  def handle_objection(text):
118
  query_embedding = model.encode([text])
@@ -122,52 +45,40 @@ def handle_objection(text):
122
  return "\n".join(responses) if responses else "No objection response found."
123
  return "No objection response found."
124
 
125
- def transcribe_audio(audio_bytes, sample_rate=16000):
126
- """Transcribe audio using the transcribe_with_chunks function from sentiment_analysis.py."""
127
- try:
128
- # Save audio bytes to a temporary WAV file
129
- with BytesIO() as wav_buffer:
130
- with wave.open(wav_buffer, 'wb') as wf:
131
- wf.setnchannels(1) # Mono audio
132
- wf.setsampwidth(2) # 2 bytes for int16
133
- wf.setframerate(sample_rate) # Sample rate
134
- wf.writeframes(audio_bytes)
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  # Use the transcribe_with_chunks function from sentiment_analysis.py
137
  chunks = transcribe_with_chunks({}) # Pass an empty objections_dict for now
138
  if chunks:
139
  return chunks[-1][0] # Return the latest transcribed text
140
- except Exception as e:
141
- print(f"Error transcribing audio: {e}")
142
- return None
143
-
144
- def audio_processing_thread(audio_frame):
145
- """Thread function to process audio frames."""
146
- # Convert audio frame to bytes
147
- audio_data = audio_frame.to_ndarray()
148
- print(f"Audio data shape: {audio_data.shape}") # Debug: Check audio data shape
149
- print(f"Audio data sample: {audio_data[:10]}") # Debug: Check first 10 samples
150
-
151
- audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
152
-
153
- # Transcribe the audio
154
- text = transcribe_audio(audio_bytes)
155
- if text:
156
- transcription_queue.put(text) # Add transcribed text to the queue
157
 
158
  def real_time_analysis():
159
  st.info("Listening... Say 'stop' to end the process.")
160
 
161
- def audio_frame_callback(audio_frame):
162
- # Start a new thread to process the audio frame
163
- threading.Thread(target=audio_processing_thread, args=(audio_frame,)).start()
164
- return audio_frame
165
-
166
  # Start WebRTC audio stream
167
  webrtc_ctx = webrtc_streamer(
168
  key="real-time-audio",
169
  mode=WebRtcMode.SENDONLY,
170
- audio_frame_callback=audio_frame_callback,
171
  media_stream_constraints={"audio": True, "video": False},
172
  )
173
 
@@ -289,4 +200,4 @@ def run_app():
289
  st.error(f"Error loading dashboard: {e}")
290
 
291
  if __name__ == "__main__":
292
- run_app()
 
1
+ from streamlit_webrtc import webrtc_streamer, WebRtcMode, AudioProcessorBase
2
  from sentiment_analysis import analyze_sentiment, transcribe_with_chunks
3
  from product_recommender import ProductRecommender
4
  from objection_handler import ObjectionHandler
 
11
  import plotly.express as px
12
  import streamlit as st
13
  import numpy as np
 
 
 
14
  import queue
15
 
16
  # Initialize components
 
22
  transcription_queue = queue.Queue()
23
 
24
  def generate_comprehensive_summary(chunks):
25
+ # Your existing function implementation
26
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  def is_valid_input(text):
29
+ # Your existing function implementation
30
+ pass
 
 
31
 
32
  def is_relevant_sentiment(sentiment_score):
33
+ # Your existing function implementation
34
+ pass
35
 
36
  def calculate_overall_sentiment(sentiment_scores):
37
+ # Your existing function implementation
38
+ pass
 
 
 
 
 
 
 
 
39
 
40
  def handle_objection(text):
41
  query_embedding = model.encode([text])
 
45
  return "\n".join(responses) if responses else "No objection response found."
46
  return "No objection response found."
47
 
48
+ class AudioProcessor(AudioProcessorBase):
49
+ def __init__(self):
50
+ self.sr = 16000 # Sample rate
51
+ self.q = transcription_queue
 
 
 
 
 
 
52
 
53
+ def recv(self, frame):
54
+ audio_data = frame.to_ndarray()
55
+ audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
56
+
57
+ # Transcribe the audio
58
+ text = self.transcribe_audio(audio_bytes)
59
+ if text:
60
+ self.q.put(text) # Add transcribed text to the queue
61
+
62
+ return frame
63
+
64
+ def transcribe_audio(self, audio_bytes):
65
+ try:
66
  # Use the transcribe_with_chunks function from sentiment_analysis.py
67
  chunks = transcribe_with_chunks({}) # Pass an empty objections_dict for now
68
  if chunks:
69
  return chunks[-1][0] # Return the latest transcribed text
70
+ except Exception as e:
71
+ print(f"Error transcribing audio: {e}")
72
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  def real_time_analysis():
75
  st.info("Listening... Say 'stop' to end the process.")
76
 
 
 
 
 
 
77
  # Start WebRTC audio stream
78
  webrtc_ctx = webrtc_streamer(
79
  key="real-time-audio",
80
  mode=WebRtcMode.SENDONLY,
81
+ audio_processor_factory=AudioProcessor,
82
  media_stream_constraints={"audio": True, "video": False},
83
  )
84
 
 
200
  st.error(f"Error loading dashboard: {e}")
201
 
202
  if __name__ == "__main__":
203
+ run_app()