import streamlit as st import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile import plotly.graph_objects as go import streamlit.components.v1 as components from datetime import datetime from audio_recorder_streamlit import audio_recorder from bs4 import BeautifulSoup from collections import defaultdict, deque, Counter from dotenv import load_dotenv from gradio_client import Client from huggingface_hub import InferenceClient from io import BytesIO from PIL import Image from PyPDF2 import PdfReader from urllib.parse import quote from xml.etree import ElementTree as ET from openai import OpenAI import extra_streamlit_components as stx from streamlit.runtime.scriptrunner import get_script_run_ctx import asyncio import edge_tts from streamlit_marquee import streamlit_marquee # ───────────────────────────────────────────────────────── # 1. CORE CONFIGURATION & SETUP # ───────────────────────────────────────────────────────── st.set_page_config( page_title="🚲TalkingAIResearcher🏆", page_icon="🚲🏆", layout="wide", initial_sidebar_state="auto", menu_items={ 'Get Help': 'https://huggingface.co/awacke1', 'Report a bug': 'https://huggingface.co/spaces/awacke1', 'About': "🚲TalkingAIResearcher🏆" } ) load_dotenv() # Available English voices for Edge TTS EDGE_TTS_VOICES = [ "en-US-AriaNeural", "en-US-GuyNeural", "en-US-JennyNeural", "en-GB-SoniaNeural", "en-GB-RyanNeural", "en-AU-NatashaNeural", "en-AU-WilliamNeural", "en-CA-ClaraNeural", "en-CA-LiamNeural" ] # Session state variables if 'marquee_settings' not in st.session_state: st.session_state['marquee_settings'] = { "background": "#1E1E1E", "color": "#FFFFFF", "font-size": "14px", "animationDuration": "20s", "width": "100%", "lineHeight": "35px" } if 'tts_voice' not in st.session_state: st.session_state['tts_voice'] = EDGE_TTS_VOICES[0] if 'audio_format' not in st.session_state: st.session_state['audio_format'] = 'mp3' if 'transcript_history' not in st.session_state: st.session_state['transcript_history'] = [] if 'chat_history' not in st.session_state: st.session_state['chat_history'] = [] if 'openai_model' not in st.session_state: st.session_state['openai_model'] = "gpt-4o-2024-05-13" if 'messages' not in st.session_state: st.session_state['messages'] = [] if 'last_voice_input' not in st.session_state: st.session_state['last_voice_input'] = "" if 'editing_file' not in st.session_state: st.session_state['editing_file'] = None if 'edit_new_name' not in st.session_state: st.session_state['edit_new_name'] = "" if 'edit_new_content' not in st.session_state: st.session_state['edit_new_content'] = "" if 'viewing_prefix' not in st.session_state: st.session_state['viewing_prefix'] = None if 'should_rerun' not in st.session_state: st.session_state['should_rerun'] = False if 'old_val' not in st.session_state: st.session_state['old_val'] = None if 'last_query' not in st.session_state: st.session_state['last_query'] = "" if 'marquee_content' not in st.session_state: st.session_state['marquee_content'] = "🚀 Welcome to TalkingAIResearcher | 🤖 Your Research Assistant" # New: default AutoRun to False (off) if 'autorun' not in st.session_state: st.session_state['autorun'] = False # API Keys openai_api_key = os.getenv('OPENAI_API_KEY', "") anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "") xai_key = os.getenv('xai',"") if 'OPENAI_API_KEY' in st.secrets: openai_api_key = st.secrets['OPENAI_API_KEY'] if 'ANTHROPIC_API_KEY' in st.secrets: anthropic_key = st.secrets["ANTHROPIC_API_KEY"] openai.api_key = openai_api_key openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID')) HF_KEY = os.getenv('HF_KEY') API_URL = os.getenv('API_URL') # Helper constants FILE_EMOJIS = { "md": "📝", "mp3": "🎵", "wav": "🔊", "pdf": "📕", "mp4": "🎥", "csv": "📈", "xlsx": "📊", "html": "🌐", "py": "🐍", "txt": "📄" } # ───────────────────────────────────────────────────────── # 2. HELPER FUNCTIONS # ───────────────────────────────────────────────────────── def get_central_time(): """Get current time in US Central timezone.""" central = pytz.timezone('US/Central') return datetime.now(central) def format_timestamp_prefix(): """Generate timestamp prefix in format MM_dd_yy_hh_mm_AM/PM.""" ct = get_central_time() return ct.strftime("%m_%d_%y_%I_%M_%p") def initialize_marquee_settings(): if 'marquee_settings' not in st.session_state: st.session_state['marquee_settings'] = { "background": "#1E1E1E", "color": "#FFFFFF", "font-size": "14px", "animationDuration": "20s", "width": "100%", "lineHeight": "35px" } def get_marquee_settings(): initialize_marquee_settings() return st.session_state['marquee_settings'] def update_marquee_settings_ui(): """Add color pickers & sliders for marquee config in sidebar.""" st.sidebar.markdown("### 🎯 Marquee Settings") cols = st.sidebar.columns(2) with cols[0]: bg_color = st.color_picker("🎨 Background", st.session_state['marquee_settings']["background"], key="bg_color_picker") text_color = st.color_picker("✍️ Text", st.session_state['marquee_settings']["color"], key="text_color_picker") with cols[1]: font_size = st.slider("📏 Size", 10, 24, 14, key="font_size_slider") duration = st.slider("⏱️ Speed", 1, 20, 20, key="duration_slider") st.session_state['marquee_settings'].update({ "background": bg_color, "color": text_color, "font-size": f"{font_size}px", "animationDuration": f"{duration}s" }) def display_marquee(text, settings, key_suffix=""): """Show marquee text with style from settings.""" truncated_text = text[:280] + "..." if len(text) > 280 else text streamlit_marquee( content=truncated_text, **settings, key=f"marquee_{key_suffix}" ) st.write("") def get_high_info_terms(text: str, top_n=10) -> list: """Extract top_n freq words or bigrams (excluding stopwords).""" stop_words = set(['the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with']) words = re.findall(r'\b\w+(?:-\w+)*\b', text.lower()) bi_grams = [' '.join(pair) for pair in zip(words, words[1:])] combined = words + bi_grams filtered = [term for term in combined if term not in stop_words and len(term.split()) <= 2] counter = Counter(filtered) return [term for term, freq in counter.most_common(top_n)] def clean_text_for_filename(text: str) -> str: """Remove special chars, short words, etc. for filenames.""" text = text.lower() text = re.sub(r'[^\w\s-]', '', text) words = text.split() # remove short or unhelpful words stop_short = set(['the', 'and', 'for', 'with', 'this', 'that', 'ai', 'library']) filtered = [w for w in words if len(w) > 3 and w not in stop_short] return '_'.join(filtered)[:200] def generate_filename(prompt, response, file_type="md", max_length=200): """ Generate a shortened filename by: 1) extracting high-info terms, 2) snippet from prompt+response, 3) remove duplicates, 4) truncate if needed. """ prefix = format_timestamp_prefix() + "_" combined_text = (prompt + " " + response)[:200] info_terms = get_high_info_terms(combined_text, top_n=5) snippet = (prompt[:40] + " " + response[:40]).strip() snippet_cleaned = clean_text_for_filename(snippet) # remove duplicates name_parts = info_terms + [snippet_cleaned] seen = set() unique_parts = [] for part in name_parts: if part not in seen: seen.add(part) unique_parts.append(part) full_name = '_'.join(unique_parts).strip('_') leftover_chars = max_length - len(prefix) - len(file_type) - 1 if len(full_name) > leftover_chars: full_name = full_name[:leftover_chars] return f"{prefix}{full_name}.{file_type}" def create_file(prompt, response, file_type="md"): """Create a text file from prompt + response with sanitized filename.""" filename = generate_filename(prompt.strip(), response.strip(), file_type) with open(filename, 'w', encoding='utf-8') as f: f.write(prompt + "\n\n" + response) return filename def get_download_link(file, file_type="zip"): """ Convert a file to base64 and return an HTML link for download. """ with open(file, "rb") as f: b64 = base64.b64encode(f.read()).decode() if file_type == "zip": return f'📂 Download {os.path.basename(file)}' elif file_type == "mp3": return f'🎵 Download {os.path.basename(file)}' elif file_type == "wav": return f'🔊 Download {os.path.basename(file)}' elif file_type == "md": return f'📝 Download {os.path.basename(file)}' else: return f'Download {os.path.basename(file)}' def clean_for_speech(text: str) -> str: """Clean up text for TTS output.""" text = text.replace("\n", " ") text = text.replace("", " ") text = text.replace("#", "") text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text) text = re.sub(r"\s+", " ", text).strip() return text async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0, file_format="mp3"): """Async TTS generation with edge-tts library.""" text = clean_for_speech(text) if not text.strip(): return None rate_str = f"{rate:+d}%" pitch_str = f"{pitch:+d}Hz" communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str) out_fn = generate_filename(text, text, file_type=file_format) await communicate.save(out_fn) return out_fn def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0, file_format="mp3"): """Wrapper for the async TTS generate call.""" return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch, file_format)) def play_and_download_audio(file_path, file_type="mp3"): """Streamlit audio + a quick download link.""" if file_path and os.path.exists(file_path): st.audio(file_path) dl_link = get_download_link(file_path, file_type=file_type) st.markdown(dl_link, unsafe_allow_html=True) def save_qa_with_audio(question, answer, voice=None): """Save Q&A to markdown and also generate audio.""" if not voice: voice = st.session_state['tts_voice'] combined_text = f"# Question\n{question}\n\n# Answer\n{answer}" md_file = create_file(question, answer, "md") audio_text = f"{question}\n\nAnswer: {answer}" audio_file = speak_with_edge_tts( audio_text, voice=voice, file_format=st.session_state['audio_format'] ) return md_file, audio_file # ───────────────────────────────────────────────────────── # 3. PAPER PARSING & DISPLAY # ───────────────────────────────────────────────────────── def parse_arxiv_refs(ref_text: str): """ Given a multi-line markdown with arxiv references, parse them into a list of dicts: {date, title, url, authors, summary, ...}. """ if not ref_text: return [] results = [] current_paper = {} lines = ref_text.split('\n') for i, line in enumerate(lines): if line.count('|') == 2: # Found a new paper line if current_paper: results.append(current_paper) if len(results) >= 20: break try: header_parts = line.strip('* ').split('|') date = header_parts[0].strip() title = header_parts[1].strip() url_match = re.search(r'(https://arxiv.org/\S+)', line) url = url_match.group(1) if url_match else f"paper_{len(results)}" current_paper = { 'date': date, 'title': title, 'url': url, 'authors': '', 'summary': '', 'full_audio': None, 'download_base64': '', } except Exception as e: st.warning(f"Error parsing paper header: {str(e)}") current_paper = {} continue elif current_paper: # If authors not set, fill it; otherwise, fill summary if not current_paper['authors']: current_paper['authors'] = line.strip('* ') else: if current_paper['summary']: current_paper['summary'] += ' ' + line.strip() else: current_paper['summary'] = line.strip() if current_paper: results.append(current_paper) return results[:20] def create_paper_links_md(papers): """Creates a minimal .md content linking to each paper's arxiv URL.""" lines = ["# Paper Links\n"] for i, p in enumerate(papers, start=1): lines.append(f"{i}. **{p['title']}** — [Arxiv]({p['url']})") return "\n".join(lines) def create_paper_audio_files(papers, input_question): """ For each paper, generate TTS audio summary, store the path in `paper['full_audio']`, and also store a base64 link for stable downloading. """ for paper in papers: try: audio_text = f"{paper['title']} by {paper['authors']}. {paper['summary']}" audio_text = clean_for_speech(audio_text) file_format = st.session_state['audio_format'] audio_file = speak_with_edge_tts( audio_text, voice=st.session_state['tts_voice'], file_format=file_format ) paper['full_audio'] = audio_file if audio_file: with open(audio_file, "rb") as af: b64_data = base64.b64encode(af.read()).decode() download_filename = os.path.basename(audio_file) mime_type = "mpeg" if file_format == "mp3" else "wav" paper['download_base64'] = ( f'🎵 Download {download_filename}' ) except Exception as e: st.warning(f"Error processing paper {paper['title']}: {str(e)}") paper['full_audio'] = None paper['download_base64'] = '' def display_papers(papers, marquee_settings): """Display paper info in the main area with marquee + expanders + audio.""" st.write("## Research Papers") for i, paper in enumerate(papers, start=1): marquee_text = f"📄 {paper['title']} | 👤 {paper['authors'][:120]} | 📝 {paper['summary'][:200]}" display_marquee(marquee_text, marquee_settings, key_suffix=f"paper_{i}") with st.expander(f"{i}. 📄 {paper['title']}", expanded=True): st.markdown(f"**{paper['date']} | {paper['title']}** — [Arxiv Link]({paper['url']})") st.markdown(f"*Authors:* {paper['authors']}") st.markdown(paper['summary']) if paper.get('full_audio'): st.write("📚 Paper Audio") st.audio(paper['full_audio']) if paper['download_base64']: st.markdown(paper['download_base64'], unsafe_allow_html=True) def display_papers_in_sidebar(papers): """Mirrors the paper listing in the sidebar with expanders, audio, etc.""" st.sidebar.title("🎶 Papers & Audio") for i, paper in enumerate(papers, start=1): with st.sidebar.expander(f"{i}. {paper['title']}"): st.markdown(f"**Arxiv:** [Link]({paper['url']})") if paper['full_audio']: st.audio(paper['full_audio']) if paper['download_base64']: st.markdown(paper['download_base64'], unsafe_allow_html=True) st.markdown(f"**Authors:** {paper['authors']}") if paper['summary']: st.markdown(f"**Summary:** {paper['summary'][:300]}...") # ───────────────────────────────────────────────────────── # 4. ZIP & DELETE-ALL UTILS # ───────────────────────────────────────────────────────── def create_zip_of_all_files(): """ Zip up all recognized file types, limiting the final zip name to ~20 chars to avoid overly long base64 strings. """ # Patterns for .md, .pdf, .mp4, .mp3, .wav, .csv, .xlsx, .html, .py, .txt file_patterns = [ "*.md", "*.pdf", "*.mp4", "*.mp3", "*.wav", "*.csv", "*.xlsx", "*.html", "*.py", "*.txt" ] all_files = [] for pat in file_patterns: all_files.extend(glob.glob(pat)) all_files = list(set(all_files)) # unique if not all_files: return None # Combine content for naming all_content = [] for f in all_files: if f.endswith(".md"): with open(f, "r", encoding="utf-8") as fin: all_content.append(fin.read()) else: all_content.append(os.path.basename(f)) # Add last query if relevant if st.session_state['last_query']: all_content.append(st.session_state['last_query']) combined_content = " ".join(all_content) info_terms = get_high_info_terms(combined_content, top_n=10) timestamp = format_timestamp_prefix() name_text = '-'.join(term for term in info_terms[:5]) short_zip_name = (timestamp + "_" + name_text)[:20] + ".zip" with zipfile.ZipFile(short_zip_name, 'w') as z: for f in all_files: z.write(f) return short_zip_name def delete_all_files(): """Removes all recognized file types from the directory.""" file_patterns = [ "*.md", "*.pdf", "*.mp4", "*.mp3", "*.wav", "*.csv", "*.xlsx", "*.html", "*.py", "*.txt" ] for pat in file_patterns: for f in glob.glob(pat): os.remove(f) # ───────────────────────────────────────────────────────── # 5. MAIN LOGIC: AI LOOKUP & VOICE INPUT # ───────────────────────────────────────────────────────── def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False): """Main routine that uses Anthropic (Claude) + Gradio ArXiv RAG pipeline.""" start = time.time() # --- 1) Claude API client = anthropic.Anthropic(api_key=anthropic_key) user_input = q response = client.messages.create( model="claude-3-sonnet-20240229", max_tokens=1000, messages=[ {"role": "user", "content": user_input} ]) st.write("Claude's reply 🧠:") st.markdown(response.content[0].text) # Save & produce audio result = response.content[0].text create_file(q, result) md_file, audio_file = save_qa_with_audio(q, result) st.subheader("📝 Main Response Audio") play_and_download_audio(audio_file, st.session_state['audio_format']) # --- 2) Arxiv RAG st.write("Arxiv's AI this Evening is Mixtral 8x7B...") client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") refs = client.predict( q, 20, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md" )[0] r2 = client.predict( q, "mistralai/Mixtral-8x7B-Instruct-v0.1", True, api_name="/ask_llm" ) result = f"### 🔎 {q}\n\n{r2}\n\n{refs}" md_file, audio_file = save_qa_with_audio(q, result) st.subheader("📝 Main Response Audio") play_and_download_audio(audio_file, st.session_state['audio_format']) # --- 3) Parse + handle papers papers = parse_arxiv_refs(refs) if papers: # Create minimal links page first paper_links = create_paper_links_md(papers) links_file = create_file(q, paper_links, "md") st.markdown(paper_links) # Then create audio for each paper create_paper_audio_files(papers, input_question=q) display_papers(papers, get_marquee_settings()) display_papers_in_sidebar(papers) else: st.warning("No papers found in the response.") elapsed = time.time() - start st.write(f"**Total Elapsed:** {elapsed:.2f} s") return result def process_voice_input(text): """When user sends voice query, we run the AI lookup + Q&A with audio.""" if not text: return st.subheader("🔍 Search Results") result = perform_ai_lookup( text, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=True ) md_file, audio_file = save_qa_with_audio(text, result) st.subheader("📝 Generated Files") st.write(f"Markdown: {md_file}") st.write(f"Audio: {audio_file}") play_and_download_audio(audio_file, st.session_state['audio_format']) # ───────────────────────────────────────────────────────── # 6. FILE HISTORY SIDEBAR # ───────────────────────────────────────────────────────── def display_file_history_in_sidebar(): """ Shows a history of each recognized file in descending order of modification time, with quick icons and optional download links. """ st.sidebar.markdown("---") st.sidebar.markdown("### 📂 File History") # Patterns for .md, .mp3, .wav, .pdf, .mp4, .csv, .xlsx, .html, .py, .txt patterns = [ "*.md", "*.pdf", "*.mp4", "*.mp3", "*.wav", "*.csv", "*.xlsx", "*.html", "*.py", "*.txt" ] all_files = [] for p in patterns: all_files.extend(glob.glob(p)) all_files = list(set(all_files)) # unique if not all_files: st.sidebar.write("No files found.") return # Sort newest first all_files = sorted(all_files, key=os.path.getmtime, reverse=True) for f in all_files: fname = os.path.basename(f) ext = os.path.splitext(fname)[1].lower().strip('.') emoji = FILE_EMOJIS.get(ext, '📦') time_str = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S") with st.sidebar.expander(f"{emoji} {fname}"): st.write(f"**Modified:** {time_str}") if ext == "md": with open(f, "r", encoding="utf-8") as file_in: snippet = file_in.read(200).replace("\n", " ") if len(snippet) == 200: snippet += "..." st.write(snippet) st.markdown(get_download_link(f, file_type="md"), unsafe_allow_html=True) elif ext in ["mp3","wav"]: st.audio(f) st.markdown(get_download_link(f, file_type=ext), unsafe_allow_html=True) else: st.markdown(get_download_link(f), unsafe_allow_html=True) # ───────────────────────────────────────────────────────── # 7. MAIN APP # ───────────────────────────────────────────────────────── def main(): """ Main Streamlit app. Now includes: 1) Voice & AutoRun at the top of the sidebar, 2) File Tools (Delete All / Zip All) in the sidebar, 3) A new '📤 Upload' tab, 4) Everything else from your original code snippet. """ # -- 1) Voice & AutoRun at top of sidebar -- st.sidebar.title("Global Settings") selected_voice = st.sidebar.selectbox( "TTS Voice", options=EDGE_TTS_VOICES, index=EDGE_TTS_VOICES.index(st.session_state['tts_voice']) ) # Autorun defaults to off (False) st.session_state.autorun = st.sidebar.checkbox("AutoRun", value=st.session_state.autorun) # Audio format audio_format = st.sidebar.radio("Audio Format", ["MP3","WAV"], index=0) if selected_voice != st.session_state['tts_voice']: st.session_state['tts_voice'] = selected_voice st.experimental_rerun() if audio_format.lower() != st.session_state['audio_format']: st.session_state['audio_format'] = audio_format.lower() st.experimental_rerun() # -- 2) File Tools: Delete All / Zip All st.sidebar.markdown("---") st.sidebar.markdown("### 🗃 File Tools") col_del, col_zip = st.sidebar.columns(2) with col_del: if st.button("🗑 Delete All"): delete_all_files() st.sidebar.success("All recognized files removed!") st.experimental_rerun() with col_zip: if st.button("📦 Zip All"): zip_name = create_zip_of_all_files() if zip_name: st.sidebar.markdown(get_download_link(zip_name, "zip"), unsafe_allow_html=True) # -- 3) Marquee Settings update_marquee_settings_ui() marquee_settings = get_marquee_settings() # -- 4) File History in sidebar display_file_history_in_sidebar() # -- 5) Display marquee display_marquee(st.session_state['marquee_content'], {**marquee_settings, "font-size": "28px", "lineHeight": "50px"}, key_suffix="welcome") # -- 6) Main action tabs tab_main = st.radio( "Action:", ["📤 Upload", "🎤 Voice", "📸 Media", "🔍 ArXiv", "📝 Editor"], horizontal=True ) # 6a) Upload Tab if tab_main == "📤 Upload": st.header("📤 Upload Files") accepted_types = [ # We'll accept basically everything (None in file_uploader), # but let's specify for clarity: "text/plain", "text/markdown", "audio/mpeg", "audio/wav", "image/png", "image/jpeg", "video/mp4", "application/pdf", "application/vnd.ms-excel", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "text/html", "application/octet-stream", ] uploaded = st.file_uploader("Select files to upload:", accept_multiple_files=True, type=None) if uploaded: for uf in uploaded: with open(uf.name, "wb") as outfile: outfile.write(uf.read()) st.success("Uploaded!") st.session_state.should_rerun = True # 6b) Voice Tab elif tab_main == "🎤 Voice": st.subheader("🎤 Voice Input") user_text = st.text_area("💬 Message:", height=100) user_text = user_text.strip().replace('\n', ' ') if st.button("📨 Send"): process_voice_input(user_text) st.subheader("📜 Chat History") for c in st.session_state.chat_history: st.write("**You:**", c["user"]) st.write("**Response:**", c["claude"]) # 6c) Media Tab elif tab_main == "📸 Media": st.header("📸 Media Gallery") tabs = st.tabs(["🎵 Audio", "🖼 Images", "🎥 Video"]) with tabs[0]: st.subheader("🎵 Audio Files") audio_files = glob.glob("*.mp3") + glob.glob("*.wav") if audio_files: for a in audio_files: with st.expander(os.path.basename(a)): st.audio(a) ext = os.path.splitext(a)[1].replace('.', '') dl_link = get_download_link(a, file_type=ext) st.markdown(dl_link, unsafe_allow_html=True) else: st.write("No audio files found.") with tabs[1]: st.subheader("🖼 Image Files") imgs = glob.glob("*.png") + glob.glob("*.jpg") + glob.glob("*.jpeg") if imgs: c = st.slider("Cols", 1, 5, 3, key="cols_images") cols = st.columns(c) for i, f in enumerate(imgs): with cols[i % c]: st.image(Image.open(f), use_container_width=True) else: st.write("No images found.") with tabs[2]: st.subheader("🎥 Video Files") vids = glob.glob("*.mp4") + glob.glob("*.mov") + glob.glob("*.avi") if vids: for v in vids: with st.expander(os.path.basename(v)): st.video(v) else: st.write("No videos found.") # 6d) ArXiv Tab elif tab_main == "🔍 ArXiv": st.subheader("🔍 Query ArXiv") q = st.text_input("🔍 Query:", key="arxiv_query") st.markdown("### 🎛 Options") st.write("(AutoRun is in the sidebar.)") extended_refs = st.checkbox("📜LongRefs", value=False, key="option_extended_refs") titles_summary = st.checkbox("🔖TitlesOnly", value=True, key="option_titles_summary") full_audio = st.checkbox("📚FullAudio", value=False, key="option_full_audio") full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript") if q and st.button("🔍Run"): st.session_state.last_query = q result = perform_ai_lookup(q, extended_refs=extended_refs, titles_summary=titles_summary, full_audio=full_audio) if full_transcript: create_file(q, result, "md") # If AutoRun is ON and user typed something if st.session_state.autorun and q: st.session_state.last_query = q result = perform_ai_lookup(q, extended_refs=extended_refs, titles_summary=titles_summary, full_audio=full_audio) if full_transcript: create_file(q, result, "md") # 6e) Editor Tab elif tab_main == "📝 Editor": st.write("Select or create a file to edit. (Currently minimal demo)") # Rerun if needed if st.session_state.should_rerun: st.session_state.should_rerun = False st.experimental_rerun() if __name__ == "__main__": main()