awacke1 commited on
Commit
630aa9e
1 Parent(s): 65a0e2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -24
app.py CHANGED
@@ -31,6 +31,7 @@ from xml.etree import ElementTree as ET
31
  from openai import OpenAI
32
  import extra_streamlit_components as stx
33
  from streamlit.runtime.scriptrunner import get_script_run_ctx
 
34
 
35
 
36
  # 1. 🚲BikeAI🏆 Configuration and Setup
@@ -54,9 +55,12 @@ st.set_page_config(
54
 
55
 
56
 
 
 
 
57
 
58
  def create_speech_component():
59
- """Create a speech recognition component compatible with basic Streamlit HTML."""
60
 
61
  speech_recognition_html = """
62
  <!DOCTYPE html>
@@ -91,9 +95,6 @@ def create_speech_component():
91
  max-height: 400px;
92
  overflow-y: auto;
93
  }
94
- .controls {
95
- margin: 10px 0;
96
- }
97
  </style>
98
  </head>
99
  <body>
@@ -106,6 +107,16 @@ def create_speech_component():
106
  <div id="output"></div>
107
 
108
  <script>
 
 
 
 
 
 
 
 
 
 
109
  if (!('webkitSpeechRecognition' in window)) {
110
  alert('Speech recognition not supported');
111
  } else {
@@ -132,17 +143,14 @@ def create_speech_component():
132
  status.textContent = 'Stopped';
133
  startButton.disabled = false;
134
  stopButton.disabled = true;
 
 
135
  };
136
 
137
  clearButton.onclick = () => {
138
  fullTranscript = '';
139
  output.textContent = '';
140
- if (window.parent.document) {
141
- const event = new CustomEvent('transcript-update', {
142
- detail: { transcript: '' }
143
- });
144
- window.parent.document.dispatchEvent(event);
145
- }
146
  };
147
 
148
  recognition.onresult = (event) => {
@@ -154,6 +162,8 @@ def create_speech_component():
154
  if (event.results[i].isFinal) {
155
  finalTranscript += transcript + '\\n';
156
  fullTranscript += transcript + '\\n';
 
 
157
  } else {
158
  interimTranscript += transcript;
159
  }
@@ -161,13 +171,6 @@ def create_speech_component():
161
 
162
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
163
  output.scrollTop = output.scrollHeight;
164
-
165
- // Update hidden input with the current transcript
166
- const hiddenInput = document.createElement('input');
167
- hiddenInput.type = 'hidden';
168
- hiddenInput.value = fullTranscript;
169
- hiddenInput.id = 'transcript-value';
170
- document.body.appendChild(hiddenInput);
171
  };
172
 
173
  recognition.onend = () => {
@@ -188,19 +191,24 @@ def create_speech_component():
188
  </html>
189
  """
190
 
191
- # Create the component without a key
192
  return components.html(speech_recognition_html, height=400)
193
 
194
  def integrate_speech_component():
195
- """Integrate the speech component into the main app."""
196
- if "voice_transcript" not in st.session_state:
197
- st.session_state.voice_transcript = ""
198
 
199
- # Get the transcript from the component
200
- transcript = create_speech_component()
201
 
202
- return st.session_state.voice_transcript
 
203
 
 
 
 
 
 
204
 
205
 
206
  # 2. 🚲BikeAI🏆 Load environment variables and initialize clients
 
31
  from openai import OpenAI
32
  import extra_streamlit_components as stx
33
  from streamlit.runtime.scriptrunner import get_script_run_ctx
34
+ import extra_streamlit_components as stx
35
 
36
 
37
  # 1. 🚲BikeAI🏆 Configuration and Setup
 
55
 
56
 
57
 
58
+ def get_cookie_manager():
59
+ """Create and return a cookie manager."""
60
+ return stx.CookieManager()
61
 
62
  def create_speech_component():
63
+ """Create speech recognition component with cookie-based storage."""
64
 
65
  speech_recognition_html = """
66
  <!DOCTYPE html>
 
95
  max-height: 400px;
96
  overflow-y: auto;
97
  }
 
 
 
98
  </style>
99
  </head>
100
  <body>
 
107
  <div id="output"></div>
108
 
109
  <script>
110
+ function setCookie(name, value, days) {
111
+ let expires = "";
112
+ if (days) {
113
+ const date = new Date();
114
+ date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
115
+ expires = "; expires=" + date.toUTCString();
116
+ }
117
+ document.cookie = name + "=" + encodeURIComponent(value) + expires + "; path=/";
118
+ }
119
+
120
  if (!('webkitSpeechRecognition' in window)) {
121
  alert('Speech recognition not supported');
122
  } else {
 
143
  status.textContent = 'Stopped';
144
  startButton.disabled = false;
145
  stopButton.disabled = true;
146
+ // Save transcript to cookie when stopping
147
+ setCookie('voice_transcript', fullTranscript, 1);
148
  };
149
 
150
  clearButton.onclick = () => {
151
  fullTranscript = '';
152
  output.textContent = '';
153
+ setCookie('voice_transcript', '', 1);
 
 
 
 
 
154
  };
155
 
156
  recognition.onresult = (event) => {
 
162
  if (event.results[i].isFinal) {
163
  finalTranscript += transcript + '\\n';
164
  fullTranscript += transcript + '\\n';
165
+ // Update cookie with latest transcript
166
+ setCookie('voice_transcript', fullTranscript, 1);
167
  } else {
168
  interimTranscript += transcript;
169
  }
 
171
 
172
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
173
  output.scrollTop = output.scrollHeight;
 
 
 
 
 
 
 
174
  };
175
 
176
  recognition.onend = () => {
 
191
  </html>
192
  """
193
 
 
194
  return components.html(speech_recognition_html, height=400)
195
 
196
  def integrate_speech_component():
197
+ """Integrate speech component with cookie-based storage."""
198
+ # Initialize cookie manager
199
+ cookie_manager = get_cookie_manager()
200
 
201
+ # Create speech component
202
+ create_speech_component()
203
 
204
+ # Get transcript from cookie
205
+ transcript = cookie_manager.get('voice_transcript') or ""
206
 
207
+ # Update session state
208
+ if transcript:
209
+ st.session_state.voice_transcript = transcript
210
+
211
+ return transcript
212
 
213
 
214
  # 2. 🚲BikeAI🏆 Load environment variables and initialize clients