Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import streamlit as st | |
import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, time, zipfile | |
import plotly.graph_objects as go | |
import streamlit.components.v1 as components | |
from datetime import datetime | |
from audio_recorder_streamlit import audio_recorder | |
from bs4 import BeautifulSoup | |
from collections import defaultdict, deque | |
from dotenv import load_dotenv | |
from gradio_client import Client | |
from huggingface_hub import InferenceClient | |
from io import BytesIO | |
from PIL import Image | |
from PyPDF2 import PdfReader | |
from urllib.parse import quote | |
from xml.etree import ElementTree as ET | |
from openai import OpenAI | |
import extra_streamlit_components as stx | |
from streamlit.runtime.scriptrunner import get_script_run_ctx | |
import asyncio | |
import edge_tts | |
# 🎯 1. Core Configuration & Setup | |
st.set_page_config( | |
page_title="🚲BikeAI🏆 Claude/GPT Research", | |
page_icon="🚲🏆", | |
layout="wide", | |
initial_sidebar_state="auto", | |
menu_items={ | |
'Get Help': 'https://huggingface.co/awacke1', | |
'Report a bug': 'https://huggingface.co/spaces/awacke1', | |
'About': "🚲BikeAI🏆 Claude/GPT Research AI" | |
} | |
) | |
load_dotenv() | |
# 🔑 2. API Setup & Clients | |
openai_api_key = os.getenv('OPENAI_API_KEY', "") | |
anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "") | |
xai_key = os.getenv('xai',"") | |
if 'OPENAI_API_KEY' in st.secrets: | |
openai_api_key = st.secrets['OPENAI_API_KEY'] | |
if 'ANTHROPIC_API_KEY' in st.secrets: | |
anthropic_key = st.secrets["ANTHROPIC_API_KEY"] | |
openai.api_key = openai_api_key | |
claude_client = anthropic.Anthropic(api_key=anthropic_key) | |
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID')) | |
HF_KEY = os.getenv('HF_KEY') | |
API_URL = os.getenv('API_URL') | |
# 📝 3. Session State Management | |
if 'transcript_history' not in st.session_state: | |
st.session_state['transcript_history'] = [] | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
if 'openai_model' not in st.session_state: | |
st.session_state['openai_model'] = "gpt-4o-2024-05-13" | |
if 'messages' not in st.session_state: | |
st.session_state['messages'] = [] | |
if 'last_voice_input' not in st.session_state: | |
st.session_state['last_voice_input'] = "" | |
if 'editing_file' not in st.session_state: | |
st.session_state['editing_file'] = None | |
if 'edit_new_name' not in st.session_state: | |
st.session_state['edit_new_name'] = "" | |
if 'edit_new_content' not in st.session_state: | |
st.session_state['edit_new_content'] = "" | |
if 'viewing_prefix' not in st.session_state: | |
st.session_state['viewing_prefix'] = None | |
if 'should_rerun' not in st.session_state: | |
st.session_state['should_rerun'] = False | |
if 'old_val' not in st.session_state: | |
st.session_state['old_val'] = None | |
# 🎨 4. Custom CSS | |
st.markdown(""" | |
<style> | |
.main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; } | |
.stMarkdown { font-family: 'Helvetica Neue', sans-serif; } | |
.stButton>button { | |
margin-right: 0.5rem; | |
} | |
.audio-player { | |
margin: 1rem 0; | |
padding: 1rem; | |
border-radius: 10px; | |
background: #f5f5f5; | |
box-shadow: 0 2px 4px rgba(0,0,0,0.1); | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
FILE_EMOJIS = { | |
"md": "📝", | |
"mp3": "🎵", | |
} | |
def clean_for_speech(text: str) -> str: | |
"""Clean text for speech synthesis""" | |
text = text.replace("\n", " ") | |
text = text.replace("</s>", " ") | |
text = text.replace("#", "") | |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text) | |
text = re.sub(r"\s+", " ", text).strip() | |
return text | |
def speech_synthesis_html(result): | |
"""Create HTML for speech synthesis""" | |
html_code = f""" | |
<html><body> | |
<script> | |
var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}"); | |
window.speechSynthesis.speak(msg); | |
</script> | |
</body></html> | |
""" | |
components.html(html_code, height=0) | |
async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0): | |
"""Generate audio using Edge TTS""" | |
text = clean_for_speech(text) | |
if not text.strip(): | |
return None | |
rate_str = f"{rate:+d}%" | |
pitch_str = f"{pitch:+d}Hz" | |
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str) | |
out_fn = generate_filename(text, text, "mp3") | |
await communicate.save(out_fn) | |
return out_fn | |
def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0): | |
"""Wrapper for edge TTS generation""" | |
return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch)) | |
def play_and_download_audio(file_path): | |
"""Play and provide download link for audio""" | |
if file_path and os.path.exists(file_path): | |
st.audio(file_path) | |
dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>' | |
st.markdown(dl_link, unsafe_allow_html=True) | |
def save_full_transcript(query, text): | |
"""Save full transcript of Arxiv results as a file.""" | |
create_file(query, text, "md") | |
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False): | |
"""Perform Arxiv search and generate audio summaries""" | |
start = time.time() | |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") | |
refs = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")[0] | |
r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm") | |
result = f"### 🔎 {q}\n\n{r2}\n\n{refs}" | |
st.markdown(result) | |
# Generate full audio version if requested | |
if full_audio: | |
complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}" | |
audio_file_full = speak_with_edge_tts(complete_text) | |
st.write("### 📚 Full Audio") | |
play_and_download_audio(audio_file_full) | |
if vocal_summary: | |
main_text = clean_for_speech(r2) | |
audio_file_main = speak_with_edge_tts(main_text) | |
st.write("### 🎙 Short Audio") | |
play_and_download_audio(audio_file_main) | |
if extended_refs: | |
summaries_text = "Extended references: " + refs.replace('"','') | |
summaries_text = clean_for_speech(summaries_text) | |
audio_file_refs = speak_with_edge_tts(summaries_text) | |
st.write("### 📜 Long Refs") | |
play_and_download_audio(audio_file_refs) | |
if titles_summary: | |
titles = [] | |
for line in refs.split('\n'): | |
m = re.search(r"\[([^\]]+)\]", line) | |
if m: | |
titles.append(m.group(1)) | |
if titles: | |
titles_text = "Titles: " + ", ".join(titles) | |
titles_text = clean_for_speech(titles_text) | |
audio_file_titles = speak_with_edge_tts(titles_text) | |
st.write("### 🔖 Titles") | |
play_and_download_audio(audio_file_titles) | |
elapsed = time.time()-start | |
st.write(f"**Total Elapsed:** {elapsed:.2f} s") | |
# Create file with result | |
create_file(q, result, "md") | |
return result | |
def process_with_gpt(text): | |
"""Process text with GPT-4""" | |
if not text: return | |
st.session_state.messages.append({"role":"user","content":text}) | |
with st.chat_message("user"): | |
st.markdown(text) | |
with st.chat_message("assistant"): | |
c = openai_client.chat.completions.create( | |
model=st.session_state["openai_model"], | |
messages=st.session_state.messages, | |
stream=False | |
) | |
ans = c.choices[0].message.content | |
st.write("GPT-4o: " + ans) | |
create_file(text, ans, "md") | |
st.session_state.messages.append({"role":"assistant","content":ans}) | |
return ans | |
def process_with_claude(text): | |
"""Process text with Claude""" | |
if not text: return | |
with st.chat_message("user"): | |
st.markdown(text) | |
with st.chat_message("assistant"): | |
r = claude_client.messages.create( | |
model="claude-3-sonnet-20240229", | |
max_tokens=1000, | |
messages=[{"role":"user","content":text}] | |
) | |
ans = r.content[0].text | |
st.write("Claude-3.5: " + ans) | |
create_file(text, ans, "md") | |
st.session_state.chat_history.append({"user":text,"claude":ans}) | |
return ans | |
def generate_filename(prompt, response, file_type="md"): | |
"""Generate filename with timestamp and cleaned text.""" | |
timestamp = datetime.now().strftime("%y%m_%H%M") | |
safe_text = re.sub(r'[^\w\s-]', '', prompt[:50]) | |
return f"{timestamp}_{safe_text}.{file_type}" | |
def create_file(prompt, response, file_type="md"): | |
"""Create file with content.""" | |
filename = generate_filename(prompt.strip(), response.strip(), file_type) | |
with open(filename, 'w', encoding='utf-8') as f: | |
f.write(prompt + "\n\n" + response) | |
return filename | |
def get_download_link(file): | |
"""Generate download link for file""" | |
with open(file, "rb") as f: | |
b64 = base64.b64encode(f.read()).decode() | |
return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">📂 Download {os.path.basename(file)}</a>' | |
import streamlit as st | |
import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, time, zipfile | |
import plotly.graph_objects as go | |
import streamlit.components.v1 as components | |
from datetime import datetime | |
from audio_recorder_streamlit import audio_recorder | |
from bs4 import BeautifulSoup | |
from collections import defaultdict, deque | |
from dotenv import load_dotenv | |
from gradio_client import Client | |
from huggingface_hub import InferenceClient | |
from io import BytesIO | |
from PIL import Image | |
from PyPDF2 import PdfReader | |
from urllib.parse import quote | |
from xml.etree import ElementTree as ET | |
from openai import OpenAI | |
import extra_streamlit_components as stx | |
from streamlit.runtime.scriptrunner import get_script_run_ctx | |
import asyncio | |
import edge_tts | |
# 🎯 1. Core Configuration & Setup | |
st.set_page_config( | |
page_title="🚲BikeAI🏆 Claude/GPT Research", | |
page_icon="🚲🏆", | |
layout="wide", | |
initial_sidebar_state="auto", | |
menu_items={ | |
'Get Help': 'https://huggingface.co/awacke1', | |
'Report a bug': 'https://huggingface.co/spaces/awacke1', | |
'About': "🚲BikeAI🏆 Claude/GPT Research AI" | |
} | |
) | |
load_dotenv() | |
# 🔑 2. API Setup & Clients | |
openai_api_key = os.getenv('OPENAI_API_KEY', "") | |
anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "") | |
xai_key = os.getenv('xai',"") | |
if 'OPENAI_API_KEY' in st.secrets: | |
openai_api_key = st.secrets['OPENAI_API_KEY'] | |
if 'ANTHROPIC_API_KEY' in st.secrets: | |
anthropic_key = st.secrets["ANTHROPIC_API_KEY"] | |
openai.api_key = openai_api_key | |
claude_client = anthropic.Anthropic(api_key=anthropic_key) | |
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID')) | |
HF_KEY = os.getenv('HF_KEY') | |
API_URL = os.getenv('API_URL') | |
# 📝 3. Session State Management | |
if 'transcript_history' not in st.session_state: | |
st.session_state['transcript_history'] = [] | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
if 'openai_model' not in st.session_state: | |
st.session_state['openai_model'] = "gpt-4o-2024-05-13" | |
if 'messages' not in st.session_state: | |
st.session_state['messages'] = [] | |
if 'last_voice_input' not in st.session_state: | |
st.session_state['last_voice_input'] = "" | |
if 'editing_file' not in st.session_state: | |
st.session_state['editing_file'] = None | |
if 'edit_new_name' not in st.session_state: | |
st.session_state['edit_new_name'] = "" | |
if 'edit_new_content' not in st.session_state: | |
st.session_state['edit_new_content'] = "" | |
if 'viewing_prefix' not in st.session_state: | |
st.session_state['viewing_prefix'] = None | |
if 'should_rerun' not in st.session_state: | |
st.session_state['should_rerun'] = False | |
if 'old_val' not in st.session_state: | |
st.session_state['old_val'] = None | |
# 🎨 4. Custom CSS | |
st.markdown(""" | |
<style> | |
.main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; } | |
.stMarkdown { font-family: 'Helvetica Neue', sans-serif; } | |
.stButton>button { | |
margin-right: 0.5rem; | |
} | |
.audio-player { | |
margin: 1rem 0; | |
padding: 1rem; | |
border-radius: 10px; | |
background: #f5f5f5; | |
box-shadow: 0 2px 4px rgba(0,0,0,0.1); | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
FILE_EMOJIS = { | |
"md": "📝", | |
"mp3": "🎵", | |
} | |
def clean_for_speech(text: str) -> str: | |
"""Clean text for speech synthesis""" | |
text = text.replace("\n", " ") | |
text = text.replace("</s>", " ") | |
text = text.replace("#", "") | |
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text) | |
text = re.sub(r"\s+", " ", text).strip() | |
return text | |
def speech_synthesis_html(result): | |
"""Create HTML for speech synthesis""" | |
html_code = f""" | |
<html><body> | |
<script> | |
var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}"); | |
window.speechSynthesis.speak(msg); | |
</script> | |
</body></html> | |
""" | |
components.html(html_code, height=0) | |
async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0): | |
"""Generate audio using Edge TTS""" | |
text = clean_for_speech(text) | |
if not text.strip(): | |
return None | |
rate_str = f"{rate:+d}%" | |
pitch_str = f"{pitch:+d}Hz" | |
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str) | |
out_fn = generate_filename(text, text, "mp3") | |
await communicate.save(out_fn) | |
return out_fn | |
def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0): | |
"""Wrapper for edge TTS generation""" | |
return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch)) | |
def play_and_download_audio(file_path): | |
"""Play and provide download link for audio""" | |
if file_path and os.path.exists(file_path): | |
st.audio(file_path) | |
dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>' | |
st.markdown(dl_link, unsafe_allow_html=True) | |
def save_full_transcript(query, text): | |
"""Save full transcript of Arxiv results as a file.""" | |
create_file(query, text, "md") | |
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False): | |
"""Perform Arxiv search and generate audio summaries""" | |
start = time.time() | |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") | |
refs = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")[0] | |
r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm") | |
result = f"### 🔎 {q}\n\n{r2}\n\n{refs}" | |
st.markdown(result) | |
# Generate full audio version if requested | |
if full_audio: | |
complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}" | |
audio_file_full = speak_with_edge_tts(complete_text) | |
st.write("### 📚 Full Audio") | |
play_and_download_audio(audio_file_full) | |
if vocal_summary: | |
main_text = clean_for_speech(r2) | |
audio_file_main = speak_with_edge_tts(main_text) | |
st.write("### 🎙 Short Audio") | |
play_and_download_audio(audio_file_main) | |
if extended_refs: | |
summaries_text = "Extended references: " + refs.replace('"','') | |
summaries_text = clean_for_speech(summaries_text) | |
audio_file_refs = speak_with_edge_tts(summaries_text) | |
st.write("### 📜 Long Refs") | |
play_and_download_audio(audio_file_refs) | |
if titles_summary: | |
titles = [] | |
for line in refs.split('\n'): | |
m = re.search(r"\[([^\]]+)\]", line) | |
if m: | |
titles.append(m.group(1)) | |
if titles: | |
titles_text = "Titles: " + ", ".join(titles) | |
titles_text = clean_for_speech(titles_text) | |
audio_file_titles = speak_with_edge_tts(titles_text) | |
st.write("### 🔖 Titles") | |
play_and_download_audio(audio_file_titles) | |
elapsed = time.time()-start | |
st.write(f"**Total Elapsed:** {elapsed:.2f} s") | |
# Create file with result | |
create_file(q, result, "md") | |
return result | |
def process_with_gpt(text): | |
"""Process text with GPT-4""" | |
if not text: return | |
st.session_state.messages.append({"role":"user","content":text}) | |
with st.chat_message("user"): | |
st.markdown(text) | |
with st.chat_message("assistant"): | |
c = openai_client.chat.completions.create( | |
model=st.session_state["openai_model"], | |
messages=st.session_state.messages, | |
stream=False | |
) | |
ans = c.choices[0].message.content | |
st.write("GPT-4o: " + ans) | |
create_file(text, ans, "md") | |
st.session_state.messages.append({"role":"assistant","content":ans}) | |
return ans | |
def process_with_claude(text): | |
"""Process text with Claude""" | |
if not text: return | |
with st.chat_message("user"): | |
st.markdown(text) | |
with st.chat_message("assistant"): | |
r = claude_client.messages.create( | |
model="claude-3-sonnet-20240229", | |
max_tokens=1000, | |
messages=[{"role":"user","content":text}] | |
) | |
ans = r.content[0].text | |
st.write("Claude-3.5: " + ans) | |
create_file(text, ans, "md") | |
st.session_state.chat_history.append({"user":text,"claude":ans}) | |
return ans | |
def generate_filename(prompt, response, file_type="md"): | |
"""Generate filename with timestamp and cleaned text.""" | |
timestamp = datetime.now().strftime("%y%m_%H%M") | |
safe_text = re.sub(r'[^\w\s-]', '', prompt[:50]) | |
return f"{timestamp}_{safe_text}.{file_type}" | |
def create_file(prompt, response, file_type="md"): | |
"""Create file with content.""" | |
filename = generate_filename(prompt.strip(), response.strip(), file_type) | |
with open(filename, 'w', encoding='utf-8') as f: | |
f.write(prompt + "\n\n" + response) | |
return filename | |
def get_download_link(file): | |
"""Generate download link for file""" | |
with open(file, "rb") as f: | |
b64 = base64.b64encode(f.read()).decode() | |
return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">📂 Download {os.path.basename(file)}</a>' | |
def main(): | |
st.sidebar.markdown("### 🚲BikeAI🏆 Multi-Agent Research") | |
tab_main = st.radio("Action:",["🎤 Voice","📸 Media","🔍 ArXiv","📝 Editor"],horizontal=True) | |
mycomponent = components.declare_component("mycomponent", path="mycomponent") | |
val = mycomponent(my_input_value="Hello") | |
# Show input in text box for editing if detected | |
if val: | |
val_stripped = val.replace('\n', ' ') | |
edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100) | |
run_option = st.selectbox("Model:", ["Arxiv", "GPT-4o", "Claude-3.5"]) | |
col1, col2 = st.columns(2) | |
with col1: | |
autorun = st.checkbox("⚙ AutoRun", value=True) | |
with col2: | |
full_audio = st.checkbox("📚FullAudio", value=False, | |
help="Full audio of results") | |
full_transcript = st.checkbox("🧾FullTranscript", value=False, | |
help="Generate a full transcript file") | |
if q and st.button("🔍Run"): | |
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, | |
titles_summary=titles_summary, full_audio=full_audio) | |
if full_transcript: | |
save_full_transcript(q, result) | |
st.markdown("### Change Prompt & Re-Run") | |
q_new = st.text_input("🔄 Modify Query:") | |
if q_new and st.button("🔄 Re-Run with Modified Query"): | |
result = perform_ai_lookup(q_new, vocal_summary=vocal_summary, extended_refs=extended_refs, | |
titles_summary=titles_summary, full_audio=full_audio) | |
if full_transcript: | |
save_full_transcript(q_new, result) | |
elif tab_main == "🎤 Voice": | |
st.subheader("🎤 Voice Input") | |
user_text = st.text_area("💬 Message:", height=100) | |
user_text = user_text.strip().replace('\n', ' ') | |
if st.button("📨 Send"): | |
process_with_gpt(user_text) | |
st.subheader("📜 Chat History") | |
t1,t2=st.tabs(["Claude History","GPT-4o History"]) | |
with t1: | |
for c in st.session_state.chat_history: | |
st.write("**You:**", c["user"]) | |
st.write("**Claude:**", c["claude"]) | |
with t2: | |
for m in st.session_state.messages: | |
with st.chat_message(m["role"]): | |
st.markdown(m["content"]) | |
elif tab_main == "📸 Media": | |
st.header("📸 Images & 🎥 Videos") | |
tabs = st.tabs(["🖼 Images", "🎥 Video"]) | |
with tabs[0]: | |
imgs = glob.glob("*.png")+glob.glob("*.jpg") | |
if imgs: | |
c = st.slider("Cols",1,5,3) | |
cols = st.columns(c) | |
for i,f in enumerate(imgs): | |
with cols[i%c]: | |
st.image(Image.open(f),use_container_width=True) | |
if st.button(f"👀 Analyze {os.path.basename(f)}", key=f"analyze_{f}"): | |
a = process_image(f,"Describe this image.") | |
st.markdown(a) | |
else: | |
st.write("No images found.") | |
with tabs[1]: | |
vids = glob.glob("*.mp4") | |
if vids: | |
for v in vids: | |
with st.expander(f"🎥 {os.path.basename(v)}"): | |
st.video(v) | |
if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"): | |
a = process_video_with_gpt(v,"Describe video.") | |
st.markdown(a) | |
else: | |
st.write("No videos found.") | |
elif tab_main == "📝 Editor": | |
if getattr(st.session_state,'current_file',None): | |
st.subheader(f"Editing: {st.session_state.current_file}") | |
new_text = st.text_area("✏️ Content:", st.session_state.file_content, height=300) | |
if st.button("💾 Save"): | |
with open(st.session_state.current_file,'w',encoding='utf-8') as f: | |
f.write(new_text) | |
st.success("Updated!") | |
st.session_state.should_rerun = True | |
else: | |
st.write("Select a file from the sidebar to edit.") | |
groups, sorted_prefixes = load_files_for_sidebar() | |
display_file_manager_sidebar(groups, sorted_prefixes) | |
if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups: | |
st.write("---") | |
st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}") | |
for f in groups[st.session_state.viewing_prefix]: | |
fname = os.path.basename(f) | |
ext = os.path.splitext(fname)[1].lower().strip('.') | |
st.write(f"### {fname}") | |
if ext == "md": | |
content = open(f,'r',encoding='utf-8').read() | |
st.markdown(content) | |
elif ext == "mp3": | |
st.audio(f) | |
else: | |
st.markdown(get_download_link(f), unsafe_allow_html=True) | |
if st.button("❌ Close"): | |
st.session_state.viewing_prefix = None | |
if st.session_state.should_rerun: | |
st.session_state.should_rerun = False | |
st.rerun() | |
if __name__ == "__main__": | |
main() |