awacke1's picture
Update app.py
22ec79b verified
raw
history blame
20.7 kB
import streamlit as st
import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
import plotly.graph_objects as go
import streamlit.components.v1 as components
from datetime import datetime
from audio_recorder_streamlit import audio_recorder
from bs4 import BeautifulSoup
from collections import defaultdict, deque
from dotenv import load_dotenv
from gradio_client import Client
from huggingface_hub import InferenceClient
from io import BytesIO
from PIL import Image
from PyPDF2 import PdfReader
from urllib.parse import quote
from xml.etree import ElementTree as ET
from openai import OpenAI
import extra_streamlit_components as stx
from streamlit.runtime.scriptrunner import get_script_run_ctx
import asyncio
import edge_tts # ensure this is installed (pip install edge-tts)
# πŸ”§ Config & Setup
st.set_page_config(
page_title="🚲BikeAIπŸ† Claude/GPT Research",
page_icon="πŸš²πŸ†",
layout="wide",
initial_sidebar_state="auto",
menu_items={
'Get Help': 'https://huggingface.co/awacke1',
'Report a bug': 'https://huggingface.co/spaces/awacke1',
'About': "🚲BikeAIπŸ† Claude/GPT Research AI"
}
)
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY') or st.secrets['OPENAI_API_KEY']
anthropic_key = os.getenv("ANTHROPIC_API_KEY_3") or st.secrets["ANTHROPIC_API_KEY"]
claude_client = anthropic.Anthropic(api_key=anthropic_key)
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
HF_KEY = os.getenv('HF_KEY')
API_URL = os.getenv('API_URL')
st.session_state.setdefault('transcript_history', [])
st.session_state.setdefault('chat_history', [])
st.session_state.setdefault('openai_model', "gpt-4o-2024-05-13")
st.session_state.setdefault('messages', [])
st.session_state.setdefault('last_voice_input', "")
# 🎨 Minimal Custom CSS
st.markdown("""
<style>
.main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
.stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
.stButton>button {
margin-right: 0.5rem;
}
</style>
""", unsafe_allow_html=True)
# πŸ”‘ Common Utilities
def generate_filename(prompt, file_type="md"):
ctz = pytz.timezone('US/Central')
date_str = datetime.now(ctz).strftime("%m%d_%H%M")
safe = re.sub(r'[<>:"/\\\\|?*\n]', ' ', prompt)
safe = re.sub(r'\s+', ' ', safe).strip()[:90]
return f"{date_str}_{safe}.{file_type}"
def create_file(filename, prompt, response):
with open(filename, 'w', encoding='utf-8') as f:
f.write(prompt + "\n\n" + response)
st.experimental_rerun()
def get_download_link(file):
with open(file, "rb") as f:
b64 = base64.b64encode(f.read()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
@st.cache_resource
def speech_synthesis_html(result):
html_code = f"""
<html><body>
<script>
var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
window.speechSynthesis.speak(msg);
</script>
</body></html>
"""
components.html(html_code, height=0)
#------------add EdgeTTS
# --- NEW FUNCTIONS FOR EDGE TTS ---
async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
if not text.strip():
return None
rate_str = f"{rate:+d}%"
pitch_str = f"{pitch:+d}Hz"
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
out_fn = generate_filename(text,"mp3")
await communicate.save(out_fn)
st.experimental_rerun()
return out_fn
def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
def play_and_download_audio(file_path):
if file_path and os.path.exists(file_path):
st.audio(file_path)
st.markdown(get_download_link(file_path), unsafe_allow_html=True)
def process_image(image_path, user_prompt):
with open(image_path, "rb") as imgf:
image_data = imgf.read()
b64img = base64.b64encode(image_data).decode("utf-8")
resp = openai_client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": [
{"type": "text", "text": user_prompt},
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
]}
],
temperature=0.0,
)
return resp.choices[0].message.content
def process_audio(audio_path):
with open(audio_path, "rb") as f:
transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
st.session_state.messages.append({"role": "user", "content": transcription.text})
st.experimental_rerun()
return transcription.text
def process_video(video_path, seconds_per_frame=1):
vid = cv2.VideoCapture(video_path)
total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
fps = vid.get(cv2.CAP_PROP_FPS)
skip = int(fps*seconds_per_frame)
frames_b64 = []
for i in range(0, total, skip):
vid.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = vid.read()
if not ret: break
_, buf = cv2.imencode(".jpg", frame)
frames_b64.append(base64.b64encode(buf).decode("utf-8"))
vid.release()
return frames_b64
def process_video_with_gpt(video_path, prompt):
frames = process_video(video_path)
resp = openai_client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role":"system","content":"Analyze video frames."},
{"role":"user","content":[
{"type":"text","text":prompt},
*[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
]}
]
)
return resp.choices[0].message.content
def search_arxiv(query):
st.write("πŸ” Searching ArXiv...")
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
r1 = client.predict(prompt=query, llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1", stream_outputs=True, api_name="/ask_llm")
st.markdown("### Mistral-8x7B-Instruct-v0.1 Result")
st.markdown(r1)
r2 = client.predict(prompt=query, llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2", stream_outputs=True, api_name="/ask_llm")
st.markdown("### Mistral-7B-Instruct-v0.2 Result")
st.markdown(r2)
return f"{r1}\n\n{r2}"
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True):
start = time.time()
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
refs = r[0]
r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
st.markdown(result)
# Main Vocal Summary (Short Answer)
if vocal_summary:
start_main_part = time.time()
audio_file_main = speak_with_edge_tts(r2, voice="en-US-AriaNeural", rate=0, pitch=0)
st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
play_and_download_audio(audio_file_main)
st.write(f"**Elapsed (Short Answer):** {time.time() - start_main_part:.2f} s")
# Extended References & Summaries (optional)
if extended_refs:
start_refs_part = time.time()
summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
audio_file_refs = speak_with_edge_tts(summaries_text, voice="en-US-AriaNeural", rate=0, pitch=0)
st.write("### πŸ“œ Extended References & Summaries")
play_and_download_audio(audio_file_refs)
st.write(f"**Elapsed (Extended References):** {time.time() - start_refs_part:.2f} s")
# Paper Titles Only (short)
if titles_summary:
start_titles_part = time.time()
titles = []
for line in refs.split('\n'):
m = re.search(r"\[([^\]]+)\]", line)
if m:
titles.append(m.group(1))
if titles:
titles_text = "Here are the titles of the papers: " + ", ".join(titles)
audio_file_titles = speak_with_edge_tts(titles_text, voice="en-US-AriaNeural", rate=0, pitch=0)
st.write("### πŸ”– Paper Titles")
play_and_download_audio(audio_file_titles)
st.write(f"**Elapsed (Titles):** {time.time() - start_titles_part:.2f} s")
elapsed = time.time()-start
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
fn = generate_filename(q,"md")
create_file(fn,q,result)
return result
def process_with_gpt(text):
if not text: return
st.session_state.messages.append({"role":"user","content":text})
with st.chat_message("user"):
st.markdown(text)
with st.chat_message("assistant"):
c = openai_client.chat.completions.create(
model=st.session_state["openai_model"],
messages=st.session_state.messages,
stream=False
)
ans = c.choices[0].message.content
st.write("GPT-4o: " + ans)
create_file(generate_filename(text,"md"),text,ans)
st.session_state.messages.append({"role":"assistant","content":ans})
st.experimental_rerun()
return ans
def process_with_claude(text):
if not text: return
with st.chat_message("user"):
st.markdown(text)
with st.chat_message("assistant"):
r = claude_client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
messages=[{"role":"user","content":text}]
)
ans = r.content[0].text
st.write("Claude: " + ans)
create_file(generate_filename(text,"md"),text,ans)
st.session_state.chat_history.append({"user":text,"claude":ans})
st.experimental_rerun()
return ans
def create_zip_of_files():
md_files = glob.glob("Media/*.md")
mp3_files = glob.glob("Media/*.mp3")
all_files = md_files + mp3_files
zip_name = "all_files.zip"
with zipfile.ZipFile(zip_name,'w') as z:
for f in all_files:
z.write(f)
st.experimental_rerun()
return zip_name
def get_media_html(p,typ="video",w="100%"):
d = base64.b64encode(open(p,'rb').read()).decode()
if typ=="video":
return f'<video width="{w}" controls autoplay muted loop><source src="data:video/mp4;base64,{d}" type="video/mp4"></video>'
else:
return f'<audio controls style="width:{w};"><source src="data:audio/mpeg;base64,{d}" type="audio/mpeg"></audio>'
MEDIA_DIR = "Media"
def load_md_mp3_pairs():
# This function groups .md and .mp3 files by their filename stem
files = glob.glob(os.path.join(MEDIA_DIR,"*.md")) + glob.glob(os.path.join(MEDIA_DIR,"*.mp3"))
grouped = defaultdict(dict)
for f in files:
base = os.path.basename(f)
stem, ext = os.path.splitext(base)
ext = ext.lower()
if ext == '.md':
grouped[stem]['md'] = f
elif ext == '.mp3':
grouped[stem]['mp3'] = f
return grouped
def display_files_sidebar():
st.sidebar.title("πŸ“‚ Files")
pairs = load_md_mp3_pairs()
# Sort by modification time of the MD if exists, else mp3. Descending by latest mod time.
def mod_time(pair):
# Return the newest mod time of available files in the pair
times = []
for f in pair.values():
times.append(os.path.getmtime(f))
return max(times)
sorted_pairs = sorted(pairs.items(), key=lambda x: mod_time(x[1]), reverse=True)
for stem, files_dict in sorted_pairs:
with st.sidebar.expander(f"**{stem}**"):
# Display action buttons per file type
# If MD file exists:
if 'md' in files_dict:
md_file = files_dict['md']
c1, c2, c3, c4 = st.columns([2,1,1,1])
with c1:
st.write("**Markdown File**")
with c2:
if st.button("πŸ‘€ View", key="view_md_"+stem):
content = open(md_file,'r',encoding='utf-8').read()
st.markdown("**MD File Content:**")
st.markdown(content)
with c3:
# Edit name/content
if st.button("✏️ Edit", key="edit_md_"+stem):
st.session_state.editing_md = stem
st.experimental_rerun()
with c4:
if st.button("πŸ—‘ Delete", key="del_md_"+stem):
os.remove(md_file)
st.experimental_rerun()
else:
st.write("No .md file for this stem.")
# If MP3 file exists:
if 'mp3' in files_dict:
mp3_file = files_dict['mp3']
c1, c2, c3 = st.columns([2,1,1])
with c1:
st.write("**Audio File**")
with c2:
if st.button("πŸ‘€ View", key="view_mp3_"+stem):
st.audio(mp3_file)
with c3:
if st.button("πŸ—‘ Delete", key="del_mp3_"+stem):
os.remove(mp3_file)
st.experimental_rerun()
else:
st.write("No .mp3 file for this stem.")
# Button to create a zip of all files
if len(pairs) > 0:
if st.sidebar.button("⬇️ Download All (.md and .mp3)"):
z = create_zip_of_files()
st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
# If editing an MD file:
if 'editing_md' in st.session_state:
stem = st.session_state.editing_md
pairs = load_md_mp3_pairs()
files_dict = pairs.get(stem, {})
if 'md' in files_dict:
md_file = files_dict['md']
content = open(md_file,'r',encoding='utf-8').read()
st.sidebar.subheader(f"Editing: {stem}.md")
new_stem = st.sidebar.text_input("New stem (filename without extension):", value=stem)
new_content = st.sidebar.text_area("Content:", content, height=200)
if st.sidebar.button("Save Changes"):
# If name changed, rename the file
if new_stem != stem:
new_path = os.path.join(MEDIA_DIR, new_stem+".md")
os.rename(md_file, new_path)
md_file = new_path
# Update content
with open(md_file,'w',encoding='utf-8') as f:
f.write(new_content)
del st.session_state.editing_md
st.experimental_rerun()
if st.sidebar.button("Cancel"):
del st.session_state.editing_md
st.experimental_rerun()
def main():
st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
# Declare the component
mycomponent = components.declare_component("mycomponent", path="mycomponent")
val = mycomponent(my_input_value="Hello")
if val:
user_input = val.strip()
if user_input:
if model_choice == "GPT-4o":
process_with_gpt(user_input)
elif model_choice == "Claude-3":
process_with_claude(user_input)
elif model_choice == "Arxiv":
st.subheader("Arxiv Only Results:")
perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
else:
col1,col2,col3=st.columns(3)
with col1:
st.subheader("GPT-4o Omni:")
try: process_with_gpt(user_input)
except: st.write('GPT 4o error')
with col2:
st.subheader("Claude-3 Sonnet:")
try: process_with_claude(user_input)
except: st.write('Claude error')
with col3:
st.subheader("Arxiv + Mistral:")
try:
perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
except:
st.write("Arxiv error")
if tab_main == "πŸ” Search ArXiv":
st.subheader("πŸ” Search ArXiv")
q=st.text_input("Research query:")
# πŸŽ›οΈ Audio Generation Options
st.markdown("### πŸŽ›οΈ Audio Generation Options")
vocal_summary = st.checkbox("πŸŽ™οΈ Vocal Summary (Short Answer)", value=True)
extended_refs = st.checkbox("πŸ“œ Extended References & Summaries (Long)", value=False)
titles_summary = st.checkbox("πŸ”– Paper Titles Only", value=True)
if q:
q = q.strip()
if q and st.button("Run ArXiv Query"):
r = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
st.markdown(r)
elif tab_main == "🎀 Voice Input":
st.subheader("🎀 Voice Recognition")
user_text = st.text_area("Message:", height=100)
user_text = user_text.strip()
if st.button("Send πŸ“¨"):
if user_text:
if model_choice == "GPT-4o":
process_with_gpt(user_text)
elif model_choice == "Claude-3":
process_with_claude(user_text)
elif model_choice == "Arxiv":
st.subheader("Arxiv Only Results:")
perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
else:
col1,col2,col3=st.columns(3)
with col1:
st.subheader("GPT-4o Omni:")
process_with_gpt(user_text)
with col2:
st.subheader("Claude-3 Sonnet:")
process_with_claude(user_text)
with col3:
st.subheader("Arxiv & Mistral:")
res = perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
st.markdown(res)
st.subheader("πŸ“œ Chat History")
t1,t2=st.tabs(["Claude History","GPT-4o History"])
with t1:
for c in st.session_state.chat_history:
st.write("**You:**", c["user"])
st.write("**Claude:**", c["claude"])
with t2:
for m in st.session_state.messages:
with st.chat_message(m["role"]):
st.markdown(m["content"])
elif tab_main == "πŸ“Έ Media Gallery":
st.header("🎬 Media Gallery - Images and Videos")
tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
with tabs[0]:
imgs = glob.glob("Media/*.png")+glob.glob("Media/*.jpg")
if imgs:
c = st.slider("Cols",1,5,3)
cols = st.columns(c)
for i,f in enumerate(imgs):
with cols[i%c]:
st.image(Image.open(f),use_container_width=True)
if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
a = process_image(f,"Describe this image.")
st.markdown(a)
else:
st.write("No images found.")
with tabs[1]:
vids = glob.glob("Media/*.mp4")
if vids:
for v in vids:
with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
st.markdown(get_media_html(v,"video"),unsafe_allow_html=True)
if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
a = process_video_with_gpt(v,"Describe video.")
st.markdown(a)
else:
st.write("No videos found.")
elif tab_main == "πŸ“ File Editor":
st.write("Use the sidebar to edit .md files by clicking the ✏️ button on the desired file.")
# Display file list last to ensure updates
display_files_sidebar()
if __name__=="__main__":
main()