awacke1 commited on
Commit
ad2029b
β€’
1 Parent(s): 1025dd3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +294 -185
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import streamlit as st
2
- import streamlit.components.v1 as components
3
  import anthropic
4
  import openai
5
  import base64
@@ -14,6 +13,7 @@ import pytz
14
  import random
15
  import re
16
  import requests
 
17
  import textract
18
  import time
19
  import zipfile
@@ -31,7 +31,6 @@ from xml.etree import ElementTree as ET
31
  from openai import OpenAI
32
  import extra_streamlit_components as stx
33
  from streamlit.runtime.scriptrunner import get_script_run_ctx
34
- import extra_streamlit_components as stx
35
 
36
 
37
  # 1. 🚲BikeAIπŸ† Configuration and Setup
@@ -53,146 +52,6 @@ st.set_page_config(
53
  }
54
  )
55
 
56
-
57
-
58
- def create_speech_component():
59
- """Create speech recognition component using postMessage for communication."""
60
-
61
- speech_recognition_html = """
62
- <div style="padding: 20px;">
63
- <div class="controls">
64
- <button id="start">Start Listening</button>
65
- <button id="stop" disabled>Stop Listening</button>
66
- <button id="clear">Clear Text</button>
67
- </div>
68
- <div id="status" style="margin: 10px 0; padding: 10px; background: #e8f5e9;">Ready</div>
69
- <div id="output" style="white-space: pre-wrap; padding: 15px; background: #f5f5f5; min-height: 100px; max-height: 400px; overflow-y: auto;"></div>
70
- <div id="debug" style="margin-top: 10px; color: #666;"></div>
71
-
72
- <script>
73
- let currentTranscript = '';
74
- const debug = document.getElementById('debug');
75
-
76
- function sendTranscriptUpdate() {
77
- // Send transcript to parent (Streamlit)
78
- window.parent.postMessage({
79
- type: 'transcript_update',
80
- data: currentTranscript
81
- }, '*');
82
- debug.textContent = `Last update: ${new Date().toLocaleTimeString()} - Length: ${currentTranscript.length}`;
83
- }
84
-
85
- // Set up periodic updates
86
- setInterval(sendTranscriptUpdate, 3000); // Send update every 3 seconds
87
-
88
- const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
89
- const startButton = document.getElementById('start');
90
- const stopButton = document.getElementById('stop');
91
- const clearButton = document.getElementById('clear');
92
- const status = document.getElementById('status');
93
- const output = document.getElementById('output');
94
-
95
- recognition.continuous = true;
96
- recognition.interimResults = true;
97
-
98
- startButton.onclick = () => {
99
- recognition.start();
100
- status.textContent = '🎀 Listening...';
101
- startButton.disabled = true;
102
- stopButton.disabled = false;
103
- };
104
-
105
- stopButton.onclick = () => {
106
- recognition.stop();
107
- status.textContent = 'Stopped';
108
- startButton.disabled = false;
109
- stopButton.disabled = true;
110
- sendTranscriptUpdate(); // Send final update when stopped
111
- };
112
-
113
- clearButton.onclick = () => {
114
- currentTranscript = '';
115
- output.textContent = '';
116
- sendTranscriptUpdate(); // Send empty transcript
117
- };
118
-
119
- recognition.onresult = (event) => {
120
- let interimTranscript = '';
121
- let finalTranscript = '';
122
-
123
- for (let i = event.resultIndex; i < event.results.length; i++) {
124
- const transcript = event.results[i][0].transcript;
125
- if (event.results[i].isFinal) {
126
- finalTranscript += transcript + ' ';
127
- currentTranscript += transcript + ' ';
128
- } else {
129
- interimTranscript += transcript;
130
- }
131
- }
132
-
133
- output.textContent = currentTranscript + (interimTranscript ? '... ' + interimTranscript : '');
134
- output.scrollTop = output.scrollHeight;
135
-
136
- if (finalTranscript) {
137
- sendTranscriptUpdate(); // Send update when we have final transcript
138
- }
139
- };
140
-
141
- recognition.onend = () => {
142
- if (!stopButton.disabled) {
143
- recognition.start();
144
- }
145
- };
146
-
147
- // Auto-start on load
148
- window.addEventListener('load', () => {
149
- setTimeout(() => startButton.click(), 1000);
150
- });
151
- </script>
152
- </div>
153
- """
154
-
155
- # Return both the component value
156
- return components.html(
157
- speech_recognition_html,
158
- height=400,
159
- )
160
-
161
- def integrate_speech_component():
162
- """Integrate speech component with session state management."""
163
- if "voice_transcript" not in st.session_state:
164
- st.session_state.voice_transcript = ""
165
- if "last_update" not in st.session_state:
166
- st.session_state.last_update = time.time()
167
-
168
- # Create placeholders for display
169
- transcript_container = st.empty()
170
- status_container = st.empty()
171
-
172
- # Create component
173
- component_val = create_speech_component()
174
-
175
- # Display current transcript
176
- current_transcript = st.session_state.voice_transcript
177
- transcript_container.text_area(
178
- "Voice Transcript:",
179
- value=current_transcript,
180
- height=100,
181
- key=f"transcript_display_{int(time.time())}"
182
- )
183
-
184
- # Show status
185
- status_container.text(
186
- f"Last updated: {datetime.fromtimestamp(st.session_state.last_update).strftime('%H:%M:%S')}"
187
- )
188
-
189
- return current_transcript
190
-
191
-
192
-
193
-
194
-
195
-
196
  # 2. 🚲BikeAIπŸ† Load environment variables and initialize clients
197
  load_dotenv()
198
 
@@ -421,6 +280,122 @@ def process_video_with_gpt(video_input, user_prompt):
421
  )
422
  return response.choices[0].message.content
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
 
425
  def extract_urls(text):
426
  try:
@@ -776,11 +751,9 @@ speech_recognition_html = """
776
  const output = document.getElementById('output');
777
  let fullTranscript = '';
778
  let lastUpdateTime = Date.now();
779
-
780
  // Configure recognition
781
  recognition.continuous = true;
782
  recognition.interimResults = true;
783
-
784
  // Function to start recognition
785
  const startRecognition = () => {
786
  try {
@@ -793,21 +766,17 @@ speech_recognition_html = """
793
  status.textContent = 'Error: ' + e.message;
794
  }
795
  };
796
-
797
  // Auto-start on load
798
  window.addEventListener('load', () => {
799
  setTimeout(startRecognition, 1000);
800
  });
801
-
802
  startButton.onclick = startRecognition;
803
-
804
  stopButton.onclick = () => {
805
  recognition.stop();
806
  status.textContent = 'Stopped';
807
  startButton.disabled = false;
808
  stopButton.disabled = true;
809
  };
810
-
811
  clearButton.onclick = () => {
812
  fullTranscript = '';
813
  output.textContent = '';
@@ -815,11 +784,9 @@ speech_recognition_html = """
815
  type: 'clear_transcript',
816
  }, '*');
817
  };
818
-
819
  recognition.onresult = (event) => {
820
  let interimTranscript = '';
821
  let finalTranscript = '';
822
-
823
  for (let i = event.resultIndex; i < event.results.length; i++) {
824
  const transcript = event.results[i][0].transcript;
825
  if (event.results[i].isFinal) {
@@ -828,7 +795,6 @@ speech_recognition_html = """
828
  interimTranscript += transcript;
829
  }
830
  }
831
-
832
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
833
  if (finalTranscript) {
834
  fullTranscript += finalTranscript;
@@ -838,14 +804,10 @@ speech_recognition_html = """
838
  }
839
  lastUpdateTime = Date.now();
840
  }
841
-
842
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
843
  output.scrollTop = output.scrollHeight;
844
-
845
  document.getElementById('streamlit-data').value = fullTranscript;
846
-
847
  };
848
-
849
  recognition.onend = () => {
850
  if (!stopButton.disabled) {
851
  try {
@@ -859,7 +821,6 @@ speech_recognition_html = """
859
  }
860
  }
861
  };
862
-
863
  recognition.onerror = (event) => {
864
  console.error('Recognition error:', event.error);
865
  status.textContent = 'Error: ' + event.error;
@@ -1004,48 +965,196 @@ def get_media_html(media_path, media_type="video", width="100%"):
1004
  def set_transcript(text):
1005
  """Set transcript in session state."""
1006
  st.session_state.voice_transcript = text
 
1007
  def main():
1008
  st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
1009
 
 
1010
  tab_main = st.radio("Choose Action:",
1011
- ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
1012
- horizontal=True)
1013
 
1014
  if tab_main == "🎀 Voice Input":
1015
  st.subheader("Voice Recognition")
1016
 
1017
- try:
1018
- # Initialize speech component
1019
- current_transcript = integrate_speech_component()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020
 
1021
- # Show last update time
1022
- st.text(f"Last updated: {datetime.fromtimestamp(st.session_state.last_update).strftime('%H:%M:%S')}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023
 
1024
- # Process buttons if we have a transcript
1025
- if current_transcript:
1026
- col1, col2, col3 = st.columns(3)
1027
-
1028
- with col1:
1029
- if st.button("Process with GPT"):
1030
- with st.spinner("Processing with GPT..."):
1031
- response = process_with_gpt(current_transcript)
1032
- st.markdown(response)
1033
-
1034
- with col2:
1035
- if st.button("Process with Claude"):
1036
- with st.spinner("Processing with Claude..."):
1037
- response = process_with_claude(current_transcript)
1038
- st.markdown(response)
1039
-
1040
- with col3:
1041
- if st.button("Search ArXiv"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1042
  with st.spinner("Searching ArXiv..."):
1043
- results = perform_ai_lookup(current_transcript)
 
 
1044
  st.markdown(results)
 
 
 
 
1045
 
1046
- except Exception as e:
1047
- st.error(f"Error in voice input: {str(e)}")
1048
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1049
 
1050
  # Always show file manager in sidebar
1051
  display_file_manager()
 
1
  import streamlit as st
 
2
  import anthropic
3
  import openai
4
  import base64
 
13
  import random
14
  import re
15
  import requests
16
+ import streamlit.components.v1 as components
17
  import textract
18
  import time
19
  import zipfile
 
31
  from openai import OpenAI
32
  import extra_streamlit_components as stx
33
  from streamlit.runtime.scriptrunner import get_script_run_ctx
 
34
 
35
 
36
  # 1. 🚲BikeAIπŸ† Configuration and Setup
 
52
  }
53
  )
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  # 2. 🚲BikeAIπŸ† Load environment variables and initialize clients
56
  load_dotenv()
57
 
 
280
  )
281
  return response.choices[0].message.content
282
 
283
+
284
+
285
+ def process_tts(text, voice="alloy"):
286
+ """
287
+ Process text-to-speech using OpenAI's TTS API
288
+ Voices available: alloy, echo, fable, onyx, nova, shimmer
289
+ """
290
+ try:
291
+ response = openai_client.audio.speech.create(
292
+ model="tts-1",
293
+ voice=voice,
294
+ input=text
295
+ )
296
+
297
+ # Generate a unique filename
298
+ filename = generate_filename("tts_output", "mp3")
299
+
300
+ # Save the audio file
301
+ response.stream_to_file(filename)
302
+
303
+ # Create audio player HTML
304
+ audio_html = f"""
305
+ <audio controls>
306
+ <source src="data:audio/mp3;base64,{base64.b64encode(open(filename, 'rb').read()).decode()}" type="audio/mp3">
307
+ Your browser does not support the audio element.
308
+ </audio>
309
+ """
310
+
311
+ return filename, audio_html
312
+ except Exception as e:
313
+ st.error(f"TTS Error: {str(e)}")
314
+ return None, None
315
+
316
+ def update_chat_interface():
317
+ """Update the chat interface to include voice selection and TTS playback"""
318
+ # Add voice selection to sidebar
319
+ st.sidebar.markdown("### πŸ—£οΈ TTS Voice Settings")
320
+ selected_voice = st.sidebar.selectbox(
321
+ "Choose TTS Voice:",
322
+ ["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
323
+ help="Select the voice for text-to-speech playback"
324
+ )
325
+
326
+ # Store the selected voice in session state
327
+ if "selected_voice" not in st.session_state:
328
+ st.session_state.selected_voice = selected_voice
329
+
330
+ # Modify the chat processing functions to include TTS
331
+ def process_with_gpt(text_input):
332
+ """Process text with GPT-4o and add TTS."""
333
+ if text_input:
334
+ st.session_state.messages.append({"role": "user", "content": text_input})
335
+
336
+ with st.chat_message("user"):
337
+ st.markdown(text_input)
338
+
339
+ with st.chat_message("assistant"):
340
+ completion = openai_client.chat.completions.create(
341
+ model=st.session_state["openai_model"],
342
+ messages=[
343
+ {"role": m["role"], "content": m["content"]}
344
+ for m in st.session_state.messages
345
+ ],
346
+ stream=False
347
+ )
348
+ return_text = completion.choices[0].message.content
349
+ st.write("GPT-4o: " + return_text)
350
+
351
+ # Add TTS playback
352
+ filename, audio_html = process_tts(return_text, st.session_state.selected_voice)
353
+ if audio_html:
354
+ st.markdown(audio_html, unsafe_allow_html=True)
355
+
356
+ # Original file handling
357
+ filename = generate_filename("GPT-4o: " + return_text, "md")
358
+ create_file(filename, text_input, return_text)
359
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
360
+ return return_text
361
+
362
+ def process_with_claude(text_input):
363
+ """Process text with Claude and add TTS."""
364
+ if text_input:
365
+ with st.chat_message("user"):
366
+ st.markdown(text_input)
367
+
368
+ with st.chat_message("assistant"):
369
+ response = claude_client.messages.create(
370
+ model="claude-3-sonnet-20240229",
371
+ max_tokens=1000,
372
+ messages=[
373
+ {"role": "user", "content": text_input}
374
+ ]
375
+ )
376
+ response_text = response.content[0].text
377
+ st.write("Claude: " + response_text)
378
+
379
+ # Add TTS playback
380
+ filename, audio_html = process_tts(response_text, st.session_state.selected_voice)
381
+ if audio_html:
382
+ st.markdown(audio_html, unsafe_allow_html=True)
383
+
384
+ # Original file handling
385
+ filename = generate_filename("Claude: " + response_text, "md")
386
+ create_file(filename, text_input, response_text)
387
+
388
+ st.session_state.chat_history.append({
389
+ "user": text_input,
390
+ "claude": response_text
391
+ })
392
+ return response_text
393
+
394
+
395
+
396
+
397
+
398
+
399
 
400
  def extract_urls(text):
401
  try:
 
751
  const output = document.getElementById('output');
752
  let fullTranscript = '';
753
  let lastUpdateTime = Date.now();
 
754
  // Configure recognition
755
  recognition.continuous = true;
756
  recognition.interimResults = true;
 
757
  // Function to start recognition
758
  const startRecognition = () => {
759
  try {
 
766
  status.textContent = 'Error: ' + e.message;
767
  }
768
  };
 
769
  // Auto-start on load
770
  window.addEventListener('load', () => {
771
  setTimeout(startRecognition, 1000);
772
  });
 
773
  startButton.onclick = startRecognition;
 
774
  stopButton.onclick = () => {
775
  recognition.stop();
776
  status.textContent = 'Stopped';
777
  startButton.disabled = false;
778
  stopButton.disabled = true;
779
  };
 
780
  clearButton.onclick = () => {
781
  fullTranscript = '';
782
  output.textContent = '';
 
784
  type: 'clear_transcript',
785
  }, '*');
786
  };
 
787
  recognition.onresult = (event) => {
788
  let interimTranscript = '';
789
  let finalTranscript = '';
 
790
  for (let i = event.resultIndex; i < event.results.length; i++) {
791
  const transcript = event.results[i][0].transcript;
792
  if (event.results[i].isFinal) {
 
795
  interimTranscript += transcript;
796
  }
797
  }
 
798
  if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
799
  if (finalTranscript) {
800
  fullTranscript += finalTranscript;
 
804
  }
805
  lastUpdateTime = Date.now();
806
  }
 
807
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
808
  output.scrollTop = output.scrollHeight;
 
809
  document.getElementById('streamlit-data').value = fullTranscript;
 
810
  };
 
811
  recognition.onend = () => {
812
  if (!stopButton.disabled) {
813
  try {
 
821
  }
822
  }
823
  };
 
824
  recognition.onerror = (event) => {
825
  console.error('Recognition error:', event.error);
826
  status.textContent = 'Error: ' + event.error;
 
965
  def set_transcript(text):
966
  """Set transcript in session state."""
967
  st.session_state.voice_transcript = text
968
+
969
  def main():
970
  st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
971
 
972
+ # Main navigation
973
  tab_main = st.radio("Choose Action:",
974
+ ["🎀 Voice Input", "πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
975
+ horizontal=True)
976
 
977
  if tab_main == "🎀 Voice Input":
978
  st.subheader("Voice Recognition")
979
 
980
+ # Initialize session state for the transcript
981
+ if 'voice_transcript' not in st.session_state:
982
+ st.session_state.voice_transcript = ""
983
+
984
+ # Display speech recognition component and capture returned value
985
+ transcript = st.components.v1.html(speech_recognition_html, height=400)
986
+
987
+ # Update session state if there's new data
988
+ if transcript is not None and transcript != "":
989
+ st.session_state.voice_transcript = transcript
990
+
991
+ # Display the transcript in a Streamlit text area
992
+ st.markdown("### Processed Voice Input:")
993
+ st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
994
+
995
+ # Add functionality to process the transcript
996
+ if st.button("Process Transcript"):
997
+ st.subheader("AI Response to Transcript")
998
+ gpt_response = process_with_gpt(st.session_state.voice_transcript)
999
+ st.markdown(gpt_response)
1000
+
1001
+ # Option to clear the transcript
1002
+ if st.button("Clear Transcript"):
1003
+ st.session_state.voice_transcript = ""
1004
+ st.rerun()
1005
 
1006
+
1007
+ # Buttons to process the transcript
1008
+ if st.button("Search with GPT"):
1009
+ st.subheader("GPT-4o Response")
1010
+ gpt_response = process_with_gpt(st.session_state.voice_transcript)
1011
+ st.markdown(gpt_response)
1012
+
1013
+ if st.button("Search with Claude"):
1014
+ st.subheader("Claude Response")
1015
+ claude_response = process_with_claude(st.session_state.voice_transcript)
1016
+ st.markdown(claude_response)
1017
+
1018
+ if st.button("Search ArXiv"):
1019
+ st.subheader("ArXiv Search Results")
1020
+ arxiv_results = perform_ai_lookup(st.session_state.voice_transcript)
1021
+ st.markdown(arxiv_results)
1022
 
1023
+
1024
+ # Display last voice input
1025
+ if st.session_state.last_voice_input:
1026
+ st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
1027
+
1028
+
1029
+ # Model Selection
1030
+ model_choice = st.sidebar.radio(
1031
+ "Choose AI Model:",
1032
+ ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
1033
+ )
1034
+
1035
+ # Chat Interface
1036
+ user_input = st.text_area("Message:", height=100)
1037
+
1038
+ if st.button("Send πŸ“¨"):
1039
+ if user_input:
1040
+ if model_choice == "GPT-4o":
1041
+ gpt_response = process_with_gpt(user_input)
1042
+ elif model_choice == "Claude-3":
1043
+ claude_response = process_with_claude(user_input)
1044
+ else: # Both
1045
+ col1, col2, col3 = st.columns(3)
1046
+ with col2:
1047
+ st.subheader("Claude-3.5 Sonnet:")
1048
+ try:
1049
+ claude_response = process_with_claude(user_input)
1050
+ except:
1051
+ st.write('Claude 3.5 Sonnet out of tokens.')
1052
+ with col1:
1053
+ st.subheader("GPT-4o Omni:")
1054
+ try:
1055
+ gpt_response = process_with_gpt(user_input)
1056
+ except:
1057
+ st.write('GPT 4o out of tokens')
1058
+ with col3:
1059
+ st.subheader("Arxiv and Mistral Research:")
1060
  with st.spinner("Searching ArXiv..."):
1061
+ #results = search_arxiv(user_input)
1062
+ results = perform_ai_lookup(user_input)
1063
+
1064
  st.markdown(results)
1065
+
1066
+ # Display Chat History
1067
+ st.subheader("Chat History πŸ“œ")
1068
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
1069
 
1070
+ with tab1:
1071
+ for chat in st.session_state.chat_history:
1072
+ st.text_area("You:", chat["user"], height=100)
1073
+ st.text_area("Claude:", chat["claude"], height=200)
1074
+ st.markdown(chat["claude"])
1075
+
1076
+ with tab2:
1077
+ for message in st.session_state.messages:
1078
+ with st.chat_message(message["role"]):
1079
+ st.markdown(message["content"])
1080
+
1081
+
1082
+ # ------------------------------------------------------- ************************* --->
1083
+
1084
+
1085
+
1086
+ if tab_main == "πŸ’¬ Chat":
1087
+ # Model Selection
1088
+ model_choice = st.sidebar.radio(
1089
+ "Choose AI Model:",
1090
+ ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
1091
+ )
1092
+
1093
+ # Chat Interface
1094
+ user_input = st.text_area("Message:", height=100)
1095
+
1096
+ if st.button("Send πŸ“¨"):
1097
+ if user_input:
1098
+ if model_choice == "GPT-4o":
1099
+ gpt_response = process_with_gpt(user_input)
1100
+ elif model_choice == "Claude-3":
1101
+ claude_response = process_with_claude(user_input)
1102
+ else: # Both
1103
+ col1, col2, col3 = st.columns(3)
1104
+ with col2:
1105
+ st.subheader("Claude-3.5 Sonnet:")
1106
+ try:
1107
+ claude_response = process_with_claude(user_input)
1108
+ except:
1109
+ st.write('Claude 3.5 Sonnet out of tokens.')
1110
+ with col1:
1111
+ st.subheader("GPT-4o Omni:")
1112
+ try:
1113
+ gpt_response = process_with_gpt(user_input)
1114
+ except:
1115
+ st.write('GPT 4o out of tokens')
1116
+ with col3:
1117
+ st.subheader("Arxiv and Mistral Research:")
1118
+ with st.spinner("Searching ArXiv..."):
1119
+ #results = search_arxiv(user_input)
1120
+ results = perform_ai_lookup(user_input)
1121
+
1122
+ st.markdown(results)
1123
+
1124
+ # Display Chat History
1125
+ st.subheader("Chat History πŸ“œ")
1126
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
1127
+
1128
+ with tab1:
1129
+ for chat in st.session_state.chat_history:
1130
+ st.text_area("You:", chat["user"], height=100)
1131
+ st.text_area("Claude:", chat["claude"], height=200)
1132
+ st.markdown(chat["claude"])
1133
+
1134
+ with tab2:
1135
+ for message in st.session_state.messages:
1136
+ with st.chat_message(message["role"]):
1137
+ st.markdown(message["content"])
1138
+
1139
+ elif tab_main == "πŸ“Έ Media Gallery":
1140
+ create_media_gallery()
1141
+
1142
+ elif tab_main == "πŸ” Search ArXiv":
1143
+ query = st.text_input("Enter your research query:")
1144
+ if query:
1145
+ with st.spinner("Searching ArXiv..."):
1146
+ results = search_arxiv(query)
1147
+ st.markdown(results)
1148
+
1149
+ elif tab_main == "πŸ“ File Editor":
1150
+ if hasattr(st.session_state, 'current_file'):
1151
+ st.subheader(f"Editing: {st.session_state.current_file}")
1152
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
1153
+ if st.button("Save Changes"):
1154
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
1155
+ file.write(new_content)
1156
+ st.success("File updated successfully!")
1157
+
1158
 
1159
  # Always show file manager in sidebar
1160
  display_file_manager()