Update app.py
Browse files
app.py
CHANGED
|
@@ -140,43 +140,43 @@ def real_time_analysis():
|
|
| 140 |
st.info("Listening... Say 'stop' to end the process.")
|
| 141 |
|
| 142 |
def audio_frame_callback(audio_frame):
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
|
| 148 |
-
|
| 149 |
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
|
| 169 |
-
|
| 170 |
-
|
| 171 |
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
|
| 179 |
-
|
| 180 |
|
| 181 |
# Start WebRTC audio stream
|
| 182 |
webrtc_ctx = webrtc_streamer(
|
|
|
|
| 140 |
st.info("Listening... Say 'stop' to end the process.")
|
| 141 |
|
| 142 |
def audio_frame_callback(audio_frame):
|
| 143 |
+
# Convert audio frame to bytes
|
| 144 |
+
audio_data = audio_frame.to_ndarray()
|
| 145 |
+
print(f"Audio data shape: {audio_data.shape}") # Debug: Check audio data shape
|
| 146 |
+
print(f"Audio data sample: {audio_data[:10]}") # Debug: Check first 10 samples
|
| 147 |
|
| 148 |
+
audio_bytes = (audio_data * 32767).astype(np.int16).tobytes() # Convert to int16 format
|
| 149 |
|
| 150 |
+
# Transcribe the audio
|
| 151 |
+
text = transcribe_audio(audio_bytes)
|
| 152 |
+
if text:
|
| 153 |
+
st.write(f"*Recognized Text:* {text}")
|
| 154 |
|
| 155 |
+
# Analyze sentiment
|
| 156 |
+
sentiment, score = analyze_sentiment(text)
|
| 157 |
+
st.write(f"*Sentiment:* {sentiment} (Score: {score})")
|
| 158 |
|
| 159 |
+
# Handle objection
|
| 160 |
+
objection_response = handle_objection(text)
|
| 161 |
+
st.write(f"*Objection Response:* {objection_response}")
|
| 162 |
|
| 163 |
+
# Get product recommendation
|
| 164 |
+
recommendations = []
|
| 165 |
+
if is_valid_input(text) and is_relevant_sentiment(score):
|
| 166 |
+
query_embedding = model.encode([text])
|
| 167 |
+
distances, indices = product_recommender.index.search(query_embedding, 1)
|
| 168 |
|
| 169 |
+
if distances[0][0] < 1.5: # Similarity threshold
|
| 170 |
+
recommendations = product_recommender.get_recommendations(text)
|
| 171 |
|
| 172 |
+
if recommendations:
|
| 173 |
+
st.write("*Product Recommendations:*")
|
| 174 |
+
for rec in recommendations:
|
| 175 |
+
st.write(rec)
|
| 176 |
+
else:
|
| 177 |
+
st.error("No transcription returned.") # Debug: Check if transcription fails
|
| 178 |
|
| 179 |
+
return audio_frame
|
| 180 |
|
| 181 |
# Start WebRTC audio stream
|
| 182 |
webrtc_ctx = webrtc_streamer(
|