awacke1 commited on
Commit
47d3d04
1 Parent(s): 630aa9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -117
app.py CHANGED
@@ -58,9 +58,8 @@ st.set_page_config(
58
  def get_cookie_manager():
59
  """Create and return a cookie manager."""
60
  return stx.CookieManager()
61
-
62
  def create_speech_component():
63
- """Create speech recognition component with cookie-based storage."""
64
 
65
  speech_recognition_html = """
66
  <!DOCTYPE html>
@@ -95,6 +94,10 @@ def create_speech_component():
95
  max-height: 400px;
96
  overflow-y: auto;
97
  }
 
 
 
 
98
  </style>
99
  </head>
100
  <body>
@@ -105,16 +108,25 @@ def create_speech_component():
105
  </div>
106
  <div id="status">Ready</div>
107
  <div id="output"></div>
 
108
 
109
  <script>
110
- function setCookie(name, value, days) {
111
- let expires = "";
112
- if (days) {
113
- const date = new Date();
114
- date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
115
- expires = "; expires=" + date.toUTCString();
 
 
 
 
 
 
 
 
 
116
  }
117
- document.cookie = name + "=" + encodeURIComponent(value) + expires + "; path=/";
118
  }
119
 
120
  if (!('webkitSpeechRecognition' in window)) {
@@ -133,24 +145,33 @@ def create_speech_component():
133
 
134
  startButton.onclick = () => {
135
  recognition.start();
136
- status.textContent = 'Listening...';
 
137
  startButton.disabled = true;
138
  stopButton.disabled = false;
 
 
 
139
  };
140
 
141
  stopButton.onclick = () => {
142
  recognition.stop();
143
  status.textContent = 'Stopped';
 
144
  startButton.disabled = false;
145
  stopButton.disabled = true;
146
- // Save transcript to cookie when stopping
147
- setCookie('voice_transcript', fullTranscript, 1);
 
148
  };
149
 
150
  clearButton.onclick = () => {
151
  fullTranscript = '';
152
  output.textContent = '';
153
- setCookie('voice_transcript', '', 1);
 
 
 
154
  };
155
 
156
  recognition.onresult = (event) => {
@@ -162,8 +183,6 @@ def create_speech_component():
162
  if (event.results[i].isFinal) {
163
  finalTranscript += transcript + '\\n';
164
  fullTranscript += transcript + '\\n';
165
- // Update cookie with latest transcript
166
- setCookie('voice_transcript', fullTranscript, 1);
167
  } else {
168
  interimTranscript += transcript;
169
  }
@@ -191,24 +210,29 @@ def create_speech_component():
191
  </html>
192
  """
193
 
194
- return components.html(speech_recognition_html, height=400)
 
 
 
 
195
 
196
  def integrate_speech_component():
197
- """Integrate speech component with cookie-based storage."""
198
- # Initialize cookie manager
199
- cookie_manager = get_cookie_manager()
200
 
201
- # Create speech component
202
- create_speech_component()
203
 
204
- # Get transcript from cookie
205
- transcript = cookie_manager.get('voice_transcript') or ""
 
 
 
 
 
 
206
 
207
- # Update session state
208
- if transcript:
209
- st.session_state.voice_transcript = transcript
210
-
211
- return transcript
212
 
213
 
214
  # 2. 🚲BikeAI🏆 Load environment variables and initialize clients
@@ -1037,108 +1061,53 @@ def main():
1037
  # Get transcript from the speech component
1038
  current_transcript = integrate_speech_component()
1039
 
1040
- # Display the transcript in a Streamlit text area
1041
- st.markdown("### Processed Voice Input:")
1042
- st.text_area("Voice Transcript", current_transcript, height=100)
 
 
 
 
 
 
 
 
 
 
1043
 
1044
  # Process buttons
1045
  col1, col2, col3 = st.columns(3)
 
1046
  with col1:
1047
  if st.button("Process with GPT"):
1048
- st.subheader("GPT-4o Response")
1049
- gpt_response = process_with_gpt(current_transcript)
1050
- st.markdown(gpt_response)
 
1051
 
1052
  with col2:
1053
  if st.button("Process with Claude"):
1054
- st.subheader("Claude Response")
1055
- claude_response = process_with_claude(current_transcript)
1056
- st.markdown(claude_response)
 
1057
 
1058
  with col3:
1059
  if st.button("Search ArXiv"):
1060
- st.subheader("ArXiv Search Results")
1061
- arxiv_results = perform_ai_lookup(current_transcript)
1062
- st.markdown(arxiv_results)
1063
-
1064
-
1065
- # Display last voice input
1066
- if st.session_state.last_voice_input:
1067
- st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
1068
-
1069
-
1070
- if tab_main == "💬 Chat":
1071
- # Model Selection
1072
- model_choice = st.sidebar.radio(
1073
- "Choose AI Model:",
1074
- ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
1075
- )
1076
 
1077
- # Chat Interface
1078
- user_input = st.text_area("Message:", height=100)
 
 
 
 
 
 
1079
 
1080
- if st.button("Send 📨"):
1081
- if user_input:
1082
- if model_choice == "GPT-4o":
1083
- gpt_response = process_with_gpt(user_input)
1084
- elif model_choice == "Claude-3":
1085
- claude_response = process_with_claude(user_input)
1086
- else: # Both
1087
- col1, col2, col3 = st.columns(3)
1088
- with col2:
1089
- st.subheader("Claude-3.5 Sonnet:")
1090
- try:
1091
- claude_response = process_with_claude(user_input)
1092
- except:
1093
- st.write('Claude 3.5 Sonnet out of tokens.')
1094
- with col1:
1095
- st.subheader("GPT-4o Omni:")
1096
- try:
1097
- gpt_response = process_with_gpt(user_input)
1098
- except:
1099
- st.write('GPT 4o out of tokens')
1100
- with col3:
1101
- st.subheader("Arxiv and Mistral Research:")
1102
- with st.spinner("Searching ArXiv..."):
1103
- #results = search_arxiv(user_input)
1104
- results = perform_ai_lookup(user_input)
1105
-
1106
- st.markdown(results)
1107
-
1108
- # Display Chat History
1109
- st.subheader("Chat History 📜")
1110
- tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
1111
-
1112
- with tab1:
1113
- for chat in st.session_state.chat_history:
1114
- st.text_area("You:", chat["user"], height=100)
1115
- st.text_area("Claude:", chat["claude"], height=200)
1116
- st.markdown(chat["claude"])
1117
-
1118
- with tab2:
1119
- for message in st.session_state.messages:
1120
- with st.chat_message(message["role"]):
1121
- st.markdown(message["content"])
1122
-
1123
- elif tab_main == "📸 Media Gallery":
1124
- create_media_gallery()
1125
-
1126
- elif tab_main == "🔍 Search ArXiv":
1127
- query = st.text_input("Enter your research query:")
1128
- if query:
1129
- with st.spinner("Searching ArXiv..."):
1130
- results = search_arxiv(query)
1131
- st.markdown(results)
1132
-
1133
- elif tab_main == "📝 File Editor":
1134
- if hasattr(st.session_state, 'current_file'):
1135
- st.subheader(f"Editing: {st.session_state.current_file}")
1136
- new_content = st.text_area("Content:", st.session_state.file_content, height=300)
1137
- if st.button("Save Changes"):
1138
- with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
1139
- file.write(new_content)
1140
- st.success("File updated successfully!")
1141
-
1142
 
1143
  # Always show file manager in sidebar
1144
  display_file_manager()
 
58
  def get_cookie_manager():
59
  """Create and return a cookie manager."""
60
  return stx.CookieManager()
 
61
  def create_speech_component():
62
+ """Create speech recognition component with timer-based updates."""
63
 
64
  speech_recognition_html = """
65
  <!DOCTYPE html>
 
94
  max-height: 400px;
95
  overflow-y: auto;
96
  }
97
+ .listening {
98
+ color: green;
99
+ font-weight: bold;
100
+ }
101
  </style>
102
  </head>
103
  <body>
 
108
  </div>
109
  <div id="status">Ready</div>
110
  <div id="output"></div>
111
+ <div id="debug"></div>
112
 
113
  <script>
114
+ let lastTranscript = '';
115
+ let updateInterval;
116
+
117
+ function checkAndUpdateTranscript() {
118
+ const currentTranscript = document.getElementById('output').textContent;
119
+ if (currentTranscript !== lastTranscript) {
120
+ lastTranscript = currentTranscript;
121
+ window.parent.postMessage({
122
+ type: 'streamlit:setComponentValue',
123
+ data: currentTranscript
124
+ }, '*');
125
+
126
+ // Debug output
127
+ document.getElementById('debug').textContent =
128
+ 'Last update: ' + new Date().toLocaleTimeString();
129
  }
 
130
  }
131
 
132
  if (!('webkitSpeechRecognition' in window)) {
 
145
 
146
  startButton.onclick = () => {
147
  recognition.start();
148
+ status.textContent = '🎤 Listening...';
149
+ status.className = 'listening';
150
  startButton.disabled = true;
151
  stopButton.disabled = false;
152
+
153
+ // Start periodic updates
154
+ updateInterval = setInterval(checkAndUpdateTranscript, 500);
155
  };
156
 
157
  stopButton.onclick = () => {
158
  recognition.stop();
159
  status.textContent = 'Stopped';
160
+ status.className = '';
161
  startButton.disabled = false;
162
  stopButton.disabled = true;
163
+
164
+ // Stop periodic updates
165
+ clearInterval(updateInterval);
166
  };
167
 
168
  clearButton.onclick = () => {
169
  fullTranscript = '';
170
  output.textContent = '';
171
+ window.parent.postMessage({
172
+ type: 'streamlit:setComponentValue',
173
+ data: ''
174
+ }, '*');
175
  };
176
 
177
  recognition.onresult = (event) => {
 
183
  if (event.results[i].isFinal) {
184
  finalTranscript += transcript + '\\n';
185
  fullTranscript += transcript + '\\n';
 
 
186
  } else {
187
  interimTranscript += transcript;
188
  }
 
210
  </html>
211
  """
212
 
213
+ # Return both the component value and height
214
+ return components.html(
215
+ speech_recognition_html,
216
+ height=400,
217
+ )
218
 
219
  def integrate_speech_component():
220
+ """Integrate speech component with timer-based updates."""
221
+ if "voice_transcript" not in st.session_state:
222
+ st.session_state.voice_transcript = ""
223
 
224
+ # Create the component and get its value
225
+ value = create_speech_component()
226
 
227
+ # If we got a value from the component, update session state
228
+ if value is not None and value != "":
229
+ st.session_state.voice_transcript = value
230
+ # Debug output
231
+ st.write("Updated transcript:", value)
232
+
233
+ return st.session_state.voice_transcript
234
+
235
 
 
 
 
 
 
236
 
237
 
238
  # 2. 🚲BikeAI🏆 Load environment variables and initialize clients
 
1061
  # Get transcript from the speech component
1062
  current_transcript = integrate_speech_component()
1063
 
1064
+ # Display the transcript with live updates
1065
+ transcript_placeholder = st.empty()
1066
+ transcript_placeholder.text_area(
1067
+ "Voice Transcript (Live)",
1068
+ value=current_transcript,
1069
+ height=100
1070
+ )
1071
+
1072
+ # Add a status indicator
1073
+ status_placeholder = st.empty()
1074
+ if current_transcript != st.session_state.get('last_transcript', ''):
1075
+ status_placeholder.success("🔴 Recording in progress...")
1076
+ st.session_state.last_transcript = current_transcript
1077
 
1078
  # Process buttons
1079
  col1, col2, col3 = st.columns(3)
1080
+
1081
  with col1:
1082
  if st.button("Process with GPT"):
1083
+ if current_transcript:
1084
+ with st.spinner("Processing with GPT..."):
1085
+ gpt_response = process_with_gpt(current_transcript)
1086
+ st.markdown(gpt_response)
1087
 
1088
  with col2:
1089
  if st.button("Process with Claude"):
1090
+ if current_transcript:
1091
+ with st.spinner("Processing with Claude..."):
1092
+ claude_response = process_with_claude(current_transcript)
1093
+ st.markdown(claude_response)
1094
 
1095
  with col3:
1096
  if st.button("Search ArXiv"):
1097
+ if current_transcript:
1098
+ with st.spinner("Searching ArXiv..."):
1099
+ arxiv_results = perform_ai_lookup(current_transcript)
1100
+ st.markdown(arxiv_results)
 
 
 
 
 
 
 
 
 
 
 
 
1101
 
1102
+ elif tab_main == "📝 File Editor":
1103
+ if hasattr(st.session_state, 'current_file'):
1104
+ st.subheader(f"Editing: {st.session_state.current_file}")
1105
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
1106
+ if st.button("Save Changes"):
1107
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
1108
+ file.write(new_content)
1109
+ st.success("File updated successfully!")
1110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1111
 
1112
  # Always show file manager in sidebar
1113
  display_file_manager()