Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -31,6 +31,11 @@ from urllib.parse import quote
|
|
31 |
from xml.etree import ElementTree as ET
|
32 |
from openai import OpenAI
|
33 |
|
|
|
|
|
|
|
|
|
|
|
34 |
# 1. 🚲BikeAI🏆 Configuration and Setup
|
35 |
Site_Name = '🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI'
|
36 |
title = "🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI"
|
@@ -706,6 +711,12 @@ speech_recognition_html = """
|
|
706 |
}
|
707 |
lastUpdateTime = Date.now();
|
708 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
709 |
|
710 |
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
711 |
output.scrollTop = output.scrollHeight;
|
@@ -878,6 +889,55 @@ def main():
|
|
878 |
|
879 |
# Display speech recognition component
|
880 |
speech_component = st.components.v1.html(speech_recognition_html, height=400)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
881 |
|
882 |
# Handle speech recognition output
|
883 |
if speech_component:
|
|
|
31 |
from xml.etree import ElementTree as ET
|
32 |
from openai import OpenAI
|
33 |
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
# 1. 🚲BikeAI🏆 Configuration and Setup
|
40 |
Site_Name = '🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI'
|
41 |
title = "🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI"
|
|
|
711 |
}
|
712 |
lastUpdateTime = Date.now();
|
713 |
}
|
714 |
+
|
715 |
+
#window.parent.postMessage({
|
716 |
+
# type: 'final_transcript',
|
717 |
+
# text: finalTranscript
|
718 |
+
#}, '*');
|
719 |
+
|
720 |
|
721 |
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
722 |
output.scrollTop = output.scrollHeight;
|
|
|
889 |
|
890 |
# Display speech recognition component
|
891 |
speech_component = st.components.v1.html(speech_recognition_html, height=400)
|
892 |
+
|
893 |
+
|
894 |
+
#Experiment: Use `st.session_state` to store the transcript
|
895 |
+
|
896 |
+
@st.experimental_memo
|
897 |
+
def set_transcript(text):
|
898 |
+
"""Set transcript in session state."""
|
899 |
+
st.session_state.voice_transcript = text
|
900 |
+
|
901 |
+
# Listen to messages from the HTML component
|
902 |
+
components.html("""
|
903 |
+
<script>
|
904 |
+
window.addEventListener('message', (event) => {
|
905 |
+
if (event.data.type === 'final_transcript') {
|
906 |
+
const transcript = event.data.text;
|
907 |
+
Streamlit.setComponentValue(transcript);
|
908 |
+
}
|
909 |
+
});
|
910 |
+
</script>
|
911 |
+
""", height=0)
|
912 |
+
|
913 |
+
|
914 |
+
if 'voice_transcript' not in st.session_state:
|
915 |
+
st.session_state.voice_transcript = ""
|
916 |
+
|
917 |
+
# Check for updates to transcript
|
918 |
+
if st.session_state.voice_transcript:
|
919 |
+
st.markdown("### Processed Voice Input:")
|
920 |
+
st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|
921 |
+
|
922 |
+
# Buttons to process the transcript
|
923 |
+
if st.button("Search with GPT"):
|
924 |
+
st.subheader("GPT-4o Response")
|
925 |
+
gpt_response = process_with_gpt(st.session_state.voice_transcript)
|
926 |
+
st.markdown(gpt_response)
|
927 |
+
|
928 |
+
if st.button("Search with Claude"):
|
929 |
+
st.subheader("Claude Response")
|
930 |
+
claude_response = process_with_claude(st.session_state.voice_transcript)
|
931 |
+
st.markdown(claude_response)
|
932 |
+
|
933 |
+
if st.button("Search ArXiv"):
|
934 |
+
st.subheader("ArXiv Search Results")
|
935 |
+
arxiv_results = perform_ai_lookup(st.session_state.voice_transcript)
|
936 |
+
st.markdown(arxiv_results)
|
937 |
+
# Clear transcript button
|
938 |
+
if st.button("Clear Transcript"):
|
939 |
+
st.session_state.voice_transcript = ""
|
940 |
+
|
941 |
|
942 |
# Handle speech recognition output
|
943 |
if speech_component:
|