Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import streamlit as st
|
2 |
-
import streamlit.components.v1 as components
|
3 |
import anthropic
|
4 |
import openai
|
5 |
import base64
|
@@ -14,6 +13,7 @@ import pytz
|
|
14 |
import random
|
15 |
import re
|
16 |
import requests
|
|
|
17 |
import textract
|
18 |
import time
|
19 |
import zipfile
|
@@ -31,7 +31,6 @@ from xml.etree import ElementTree as ET
|
|
31 |
from openai import OpenAI
|
32 |
import extra_streamlit_components as stx
|
33 |
from streamlit.runtime.scriptrunner import get_script_run_ctx
|
34 |
-
import extra_streamlit_components as stx
|
35 |
|
36 |
|
37 |
# 1. ๐ฒBikeAI๐ Configuration and Setup
|
@@ -53,146 +52,6 @@ st.set_page_config(
|
|
53 |
}
|
54 |
)
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
def create_speech_component():
|
59 |
-
"""Create speech recognition component using postMessage for communication."""
|
60 |
-
|
61 |
-
speech_recognition_html = """
|
62 |
-
<div style="padding: 20px;">
|
63 |
-
<div class="controls">
|
64 |
-
<button id="start">Start Listening</button>
|
65 |
-
<button id="stop" disabled>Stop Listening</button>
|
66 |
-
<button id="clear">Clear Text</button>
|
67 |
-
</div>
|
68 |
-
<div id="status" style="margin: 10px 0; padding: 10px; background: #e8f5e9;">Ready</div>
|
69 |
-
<div id="output" style="white-space: pre-wrap; padding: 15px; background: #f5f5f5; min-height: 100px; max-height: 400px; overflow-y: auto;"></div>
|
70 |
-
<div id="debug" style="margin-top: 10px; color: #666;"></div>
|
71 |
-
|
72 |
-
<script>
|
73 |
-
let currentTranscript = '';
|
74 |
-
const debug = document.getElementById('debug');
|
75 |
-
|
76 |
-
function sendTranscriptUpdate() {
|
77 |
-
// Send transcript to parent (Streamlit)
|
78 |
-
window.parent.postMessage({
|
79 |
-
type: 'transcript_update',
|
80 |
-
data: currentTranscript
|
81 |
-
}, '*');
|
82 |
-
debug.textContent = `Last update: ${new Date().toLocaleTimeString()} - Length: ${currentTranscript.length}`;
|
83 |
-
}
|
84 |
-
|
85 |
-
// Set up periodic updates
|
86 |
-
setInterval(sendTranscriptUpdate, 3000); // Send update every 3 seconds
|
87 |
-
|
88 |
-
const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
|
89 |
-
const startButton = document.getElementById('start');
|
90 |
-
const stopButton = document.getElementById('stop');
|
91 |
-
const clearButton = document.getElementById('clear');
|
92 |
-
const status = document.getElementById('status');
|
93 |
-
const output = document.getElementById('output');
|
94 |
-
|
95 |
-
recognition.continuous = true;
|
96 |
-
recognition.interimResults = true;
|
97 |
-
|
98 |
-
startButton.onclick = () => {
|
99 |
-
recognition.start();
|
100 |
-
status.textContent = '๐ค Listening...';
|
101 |
-
startButton.disabled = true;
|
102 |
-
stopButton.disabled = false;
|
103 |
-
};
|
104 |
-
|
105 |
-
stopButton.onclick = () => {
|
106 |
-
recognition.stop();
|
107 |
-
status.textContent = 'Stopped';
|
108 |
-
startButton.disabled = false;
|
109 |
-
stopButton.disabled = true;
|
110 |
-
sendTranscriptUpdate(); // Send final update when stopped
|
111 |
-
};
|
112 |
-
|
113 |
-
clearButton.onclick = () => {
|
114 |
-
currentTranscript = '';
|
115 |
-
output.textContent = '';
|
116 |
-
sendTranscriptUpdate(); // Send empty transcript
|
117 |
-
};
|
118 |
-
|
119 |
-
recognition.onresult = (event) => {
|
120 |
-
let interimTranscript = '';
|
121 |
-
let finalTranscript = '';
|
122 |
-
|
123 |
-
for (let i = event.resultIndex; i < event.results.length; i++) {
|
124 |
-
const transcript = event.results[i][0].transcript;
|
125 |
-
if (event.results[i].isFinal) {
|
126 |
-
finalTranscript += transcript + ' ';
|
127 |
-
currentTranscript += transcript + ' ';
|
128 |
-
} else {
|
129 |
-
interimTranscript += transcript;
|
130 |
-
}
|
131 |
-
}
|
132 |
-
|
133 |
-
output.textContent = currentTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
134 |
-
output.scrollTop = output.scrollHeight;
|
135 |
-
|
136 |
-
if (finalTranscript) {
|
137 |
-
sendTranscriptUpdate(); // Send update when we have final transcript
|
138 |
-
}
|
139 |
-
};
|
140 |
-
|
141 |
-
recognition.onend = () => {
|
142 |
-
if (!stopButton.disabled) {
|
143 |
-
recognition.start();
|
144 |
-
}
|
145 |
-
};
|
146 |
-
|
147 |
-
// Auto-start on load
|
148 |
-
window.addEventListener('load', () => {
|
149 |
-
setTimeout(() => startButton.click(), 1000);
|
150 |
-
});
|
151 |
-
</script>
|
152 |
-
</div>
|
153 |
-
"""
|
154 |
-
|
155 |
-
# Return both the component value
|
156 |
-
return components.html(
|
157 |
-
speech_recognition_html,
|
158 |
-
height=400,
|
159 |
-
)
|
160 |
-
|
161 |
-
def integrate_speech_component():
|
162 |
-
"""Integrate speech component with session state management."""
|
163 |
-
if "voice_transcript" not in st.session_state:
|
164 |
-
st.session_state.voice_transcript = ""
|
165 |
-
if "last_update" not in st.session_state:
|
166 |
-
st.session_state.last_update = time.time()
|
167 |
-
|
168 |
-
# Create placeholders for display
|
169 |
-
transcript_container = st.empty()
|
170 |
-
status_container = st.empty()
|
171 |
-
|
172 |
-
# Create component
|
173 |
-
component_val = create_speech_component()
|
174 |
-
|
175 |
-
# Display current transcript
|
176 |
-
current_transcript = st.session_state.voice_transcript
|
177 |
-
transcript_container.text_area(
|
178 |
-
"Voice Transcript:",
|
179 |
-
value=current_transcript,
|
180 |
-
height=100,
|
181 |
-
key=f"transcript_display_{int(time.time())}"
|
182 |
-
)
|
183 |
-
|
184 |
-
# Show status
|
185 |
-
status_container.text(
|
186 |
-
f"Last updated: {datetime.fromtimestamp(st.session_state.last_update).strftime('%H:%M:%S')}"
|
187 |
-
)
|
188 |
-
|
189 |
-
return current_transcript
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
# 2. ๐ฒBikeAI๐ Load environment variables and initialize clients
|
197 |
load_dotenv()
|
198 |
|
@@ -1004,48 +863,139 @@ def get_media_html(media_path, media_type="video", width="100%"):
|
|
1004 |
def set_transcript(text):
|
1005 |
"""Set transcript in session state."""
|
1006 |
st.session_state.voice_transcript = text
|
|
|
1007 |
def main():
|
1008 |
st.sidebar.markdown("### ๐ฒBikeAI๐ Claude and GPT Multi-Agent Research AI")
|
1009 |
|
|
|
1010 |
tab_main = st.radio("Choose Action:",
|
1011 |
-
|
1012 |
-
|
1013 |
|
1014 |
if tab_main == "๐ค Voice Input":
|
1015 |
st.subheader("Voice Recognition")
|
1016 |
|
1017 |
-
|
1018 |
-
|
1019 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1020 |
|
1021 |
-
|
1022 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1023 |
|
1024 |
-
|
1025 |
-
|
1026 |
-
|
1027 |
-
|
1028 |
-
|
1029 |
-
|
1030 |
-
|
1031 |
-
|
1032 |
-
|
1033 |
-
|
1034 |
-
|
1035 |
-
|
1036 |
-
|
1037 |
-
|
1038 |
-
|
1039 |
-
|
1040 |
-
|
1041 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1042 |
with st.spinner("Searching ArXiv..."):
|
1043 |
-
results =
|
|
|
|
|
1044 |
st.markdown(results)
|
|
|
|
|
|
|
|
|
1045 |
|
1046 |
-
|
1047 |
-
|
1048 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1049 |
|
1050 |
# Always show file manager in sidebar
|
1051 |
display_file_manager()
|
|
|
1 |
import streamlit as st
|
|
|
2 |
import anthropic
|
3 |
import openai
|
4 |
import base64
|
|
|
13 |
import random
|
14 |
import re
|
15 |
import requests
|
16 |
+
import streamlit.components.v1 as components
|
17 |
import textract
|
18 |
import time
|
19 |
import zipfile
|
|
|
31 |
from openai import OpenAI
|
32 |
import extra_streamlit_components as stx
|
33 |
from streamlit.runtime.scriptrunner import get_script_run_ctx
|
|
|
34 |
|
35 |
|
36 |
# 1. ๐ฒBikeAI๐ Configuration and Setup
|
|
|
52 |
}
|
53 |
)
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
# 2. ๐ฒBikeAI๐ Load environment variables and initialize clients
|
56 |
load_dotenv()
|
57 |
|
|
|
863 |
def set_transcript(text):
|
864 |
"""Set transcript in session state."""
|
865 |
st.session_state.voice_transcript = text
|
866 |
+
|
867 |
def main():
|
868 |
st.sidebar.markdown("### ๐ฒBikeAI๐ Claude and GPT Multi-Agent Research AI")
|
869 |
|
870 |
+
# Main navigation
|
871 |
tab_main = st.radio("Choose Action:",
|
872 |
+
["๐ค Voice Input", "๐ฌ Chat", "๐ธ Media Gallery", "๐ Search ArXiv", "๐ File Editor"],
|
873 |
+
horizontal=True)
|
874 |
|
875 |
if tab_main == "๐ค Voice Input":
|
876 |
st.subheader("Voice Recognition")
|
877 |
|
878 |
+
# Initialize session state for the transcript
|
879 |
+
if 'voice_transcript' not in st.session_state:
|
880 |
+
st.session_state.voice_transcript = ""
|
881 |
+
|
882 |
+
# Display speech recognition component and capture returned value
|
883 |
+
transcript = st.components.v1.html(speech_recognition_html, height=400)
|
884 |
+
|
885 |
+
# Update session state if there's new data
|
886 |
+
if transcript is not None and transcript != "":
|
887 |
+
st.session_state.voice_transcript = transcript
|
888 |
+
|
889 |
+
# Display the transcript in a Streamlit text area
|
890 |
+
st.markdown("### Processed Voice Input:")
|
891 |
+
st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|
892 |
+
|
893 |
+
# Add functionality to process the transcript
|
894 |
+
if st.button("Process Transcript"):
|
895 |
+
st.subheader("AI Response to Transcript")
|
896 |
+
gpt_response = process_with_gpt(st.session_state.voice_transcript)
|
897 |
+
st.markdown(gpt_response)
|
898 |
+
|
899 |
+
# Option to clear the transcript
|
900 |
+
if st.button("Clear Transcript"):
|
901 |
+
st.session_state.voice_transcript = ""
|
902 |
+
st.rerun()
|
903 |
|
904 |
+
|
905 |
+
# Buttons to process the transcript
|
906 |
+
if st.button("Search with GPT"):
|
907 |
+
st.subheader("GPT-4o Response")
|
908 |
+
gpt_response = process_with_gpt(st.session_state.voice_transcript)
|
909 |
+
st.markdown(gpt_response)
|
910 |
+
|
911 |
+
if st.button("Search with Claude"):
|
912 |
+
st.subheader("Claude Response")
|
913 |
+
claude_response = process_with_claude(st.session_state.voice_transcript)
|
914 |
+
st.markdown(claude_response)
|
915 |
+
|
916 |
+
if st.button("Search ArXiv"):
|
917 |
+
st.subheader("ArXiv Search Results")
|
918 |
+
arxiv_results = perform_ai_lookup(st.session_state.voice_transcript)
|
919 |
+
st.markdown(arxiv_results)
|
920 |
|
921 |
+
|
922 |
+
# Display last voice input
|
923 |
+
if st.session_state.last_voice_input:
|
924 |
+
st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
|
925 |
+
|
926 |
+
|
927 |
+
if tab_main == "๐ฌ Chat":
|
928 |
+
# Model Selection
|
929 |
+
model_choice = st.sidebar.radio(
|
930 |
+
"Choose AI Model:",
|
931 |
+
["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
|
932 |
+
)
|
933 |
+
|
934 |
+
# Chat Interface
|
935 |
+
user_input = st.text_area("Message:", height=100)
|
936 |
+
|
937 |
+
if st.button("Send ๐จ"):
|
938 |
+
if user_input:
|
939 |
+
if model_choice == "GPT-4o":
|
940 |
+
gpt_response = process_with_gpt(user_input)
|
941 |
+
elif model_choice == "Claude-3":
|
942 |
+
claude_response = process_with_claude(user_input)
|
943 |
+
else: # Both
|
944 |
+
col1, col2, col3 = st.columns(3)
|
945 |
+
with col2:
|
946 |
+
st.subheader("Claude-3.5 Sonnet:")
|
947 |
+
try:
|
948 |
+
claude_response = process_with_claude(user_input)
|
949 |
+
except:
|
950 |
+
st.write('Claude 3.5 Sonnet out of tokens.')
|
951 |
+
with col1:
|
952 |
+
st.subheader("GPT-4o Omni:")
|
953 |
+
try:
|
954 |
+
gpt_response = process_with_gpt(user_input)
|
955 |
+
except:
|
956 |
+
st.write('GPT 4o out of tokens')
|
957 |
+
with col3:
|
958 |
+
st.subheader("Arxiv and Mistral Research:")
|
959 |
with st.spinner("Searching ArXiv..."):
|
960 |
+
#results = search_arxiv(user_input)
|
961 |
+
results = perform_ai_lookup(user_input)
|
962 |
+
|
963 |
st.markdown(results)
|
964 |
+
|
965 |
+
# Display Chat History
|
966 |
+
st.subheader("Chat History ๐")
|
967 |
+
tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
|
968 |
|
969 |
+
with tab1:
|
970 |
+
for chat in st.session_state.chat_history:
|
971 |
+
st.text_area("You:", chat["user"], height=100)
|
972 |
+
st.text_area("Claude:", chat["claude"], height=200)
|
973 |
+
st.markdown(chat["claude"])
|
974 |
+
|
975 |
+
with tab2:
|
976 |
+
for message in st.session_state.messages:
|
977 |
+
with st.chat_message(message["role"]):
|
978 |
+
st.markdown(message["content"])
|
979 |
+
|
980 |
+
elif tab_main == "๐ธ Media Gallery":
|
981 |
+
create_media_gallery()
|
982 |
+
|
983 |
+
elif tab_main == "๐ Search ArXiv":
|
984 |
+
query = st.text_input("Enter your research query:")
|
985 |
+
if query:
|
986 |
+
with st.spinner("Searching ArXiv..."):
|
987 |
+
results = search_arxiv(query)
|
988 |
+
st.markdown(results)
|
989 |
+
|
990 |
+
elif tab_main == "๐ File Editor":
|
991 |
+
if hasattr(st.session_state, 'current_file'):
|
992 |
+
st.subheader(f"Editing: {st.session_state.current_file}")
|
993 |
+
new_content = st.text_area("Content:", st.session_state.file_content, height=300)
|
994 |
+
if st.button("Save Changes"):
|
995 |
+
with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
|
996 |
+
file.write(new_content)
|
997 |
+
st.success("File updated successfully!")
|
998 |
+
|
999 |
|
1000 |
# Always show file manager in sidebar
|
1001 |
display_file_manager()
|