import streamlit as st
import anthropic
import openai
import base64
from datetime import datetime
import plotly.graph_objects as go
import cv2
import glob
import json
import math
import os
import pytz
import random
import re
import requests
import streamlit.components.v1 as components
import textract
import time
import zipfile
from audio_recorder_streamlit import audio_recorder
from bs4 import BeautifulSoup
from collections import deque
from dotenv import load_dotenv
from gradio_client import Client, handle_file
from huggingface_hub import InferenceClient
from io import BytesIO
from moviepy.editor import VideoFileClip
from PIL import Image
from PyPDF2 import PdfReader
from urllib.parse import quote
from xml.etree import ElementTree as ET
from openai import OpenAI
# Configuration and Setup
Site_Name = '🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI'
title = "🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI"
helpURL = 'https://huggingface.co/awacke1'
bugURL = 'https://huggingface.co/spaces/awacke1'
icons = '🚲🏆'
st.set_page_config(
page_title=title,
page_icon=icons,
layout="wide",
initial_sidebar_state="auto",
menu_items={
'Get Help': helpURL,
'Report a bug': bugURL,
'About': title
}
)
# Load environment variables and initialize clients
load_dotenv()
# OpenAI setup
openai.api_key = os.getenv('OPENAI_API_KEY')
if openai.api_key == None:
openai.api_key = st.secrets['OPENAI_API_KEY']
openai_client = OpenAI(
api_key=os.getenv('OPENAI_API_KEY'),
organization=os.getenv('OPENAI_ORG_ID')
)
# Claude setup
anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
if anthropic_key == None:
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
claude_client = anthropic.Anthropic(api_key=anthropic_key)
# Initialize session states
if 'transcript_history' not in st.session_state:
st.session_state.transcript_history = []
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4o-2024-05-13"
if "messages" not in st.session_state:
st.session_state.messages = []
if 'last_voice_input' not in st.session_state:
st.session_state.last_voice_input = ""
# Speech Recognition HTML Component
speech_recognition_html = """
Continuous Speech Demo
Ready
"""
# Helper Functions
def generate_filename(prompt, file_type):
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
return f"{safe_date_time}_{safe_prompt}.{file_type}"
# [Previous helper functions remain the same]
# ... [Include all the helper functions from the second file]
def main():
st.sidebar.markdown("### 🚲BikeAI🏆 Claude and GPT Multi-Agent Research AI")
# Main navigation
tab_main = st.radio("Choose Action:",
["🎤 Voice Input", "💬 Chat", "📸 Media Gallery", "🔍 Search ArXiv", "📝 File Editor"],
horizontal=True)
if tab_main == "🎤 Voice Input":
st.subheader("Voice Recognition")
# Display speech recognition component
speech_component = st.components.v1.html(speech_recognition_html, height=400)
# Handle speech recognition output
if speech_component:
try:
data = speech_component
if isinstance(data, dict):
if data.get('type') == 'final_transcript':
text = data.get('text', '').strip()
if text:
st.session_state.last_voice_input = text
# Process voice input with AI
st.subheader("AI Response to Voice Input:")
col1, col2, col3 = st.columns(3)
with col2:
st.write("Claude-3.5 Sonnet:")
try:
claude_response = process_with_claude(text)
except:
st.write('Claude 3.5 Sonnet out of tokens.')
with col1:
st.write("GPT-4o Omni:")
try:
gpt_response = process_with_gpt(text)
except:
st.write('GPT 4o out of tokens')
with col3:
st.write("Arxiv and Mistral Research:")
with st.spinner("Searching ArXiv..."):
results = perform_ai_lookup(text)
st.markdown(results)
elif data.get('type') == 'clear_transcript':
st.session_state.last_voice_input = ""
st.experimental_rerun()
except Exception as e:
st.error(f"Error processing voice input: {e}")
# Display last voice input
if st.session_state.last_voice_input:
st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
# [Rest of the main function remains the same]
elif tab_main == "💬 Chat":
# [Previous chat interface code]
pass
elif tab_main == "📸 Media Gallery":
create_media_gallery()
elif tab_main == "🔍 Search ArXiv":
query = st.text_input("Enter your research query:")
if query:
with st.spinner("Searching ArXiv..."):
results = search_arxiv(query)
st.markdown(results)
elif tab_main == "📝 File Editor":
if hasattr(st.session_state, 'current_file'):
st.subheader(f"Editing: {st.session_state.current_file}")
new_content = st.text_area("Content:", st.session_state.file_content, height=300)
if st.button("Save Changes"):
with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
file.write(new_content)
st.success("File updated successfully!")
# Always show file manager in sidebar
display_file_manager()
if __name__ == "__main__":
main()