awacke1 commited on
Commit
5a4281c
β€’
1 Parent(s): 4565252

Update backup17.app.py

Browse files
Files changed (1) hide show
  1. backup17.app.py +177 -107
backup17.app.py CHANGED
@@ -5,7 +5,7 @@ import streamlit.components.v1 as components
5
  from datetime import datetime
6
  from audio_recorder_streamlit import audio_recorder
7
  from bs4 import BeautifulSoup
8
- from collections import deque
9
  from dotenv import load_dotenv
10
  from gradio_client import Client
11
  from huggingface_hub import InferenceClient
@@ -18,7 +18,7 @@ from openai import OpenAI
18
  import extra_streamlit_components as stx
19
  from streamlit.runtime.scriptrunner import get_script_run_ctx
20
  import asyncio
21
- import edge_tts # ensure this is installed (pip install edge-tts)
22
 
23
  # πŸ”§ Config & Setup
24
  st.set_page_config(
@@ -40,21 +40,46 @@ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_OR
40
  HF_KEY = os.getenv('HF_KEY')
41
  API_URL = os.getenv('API_URL')
42
 
43
- st.session_state.setdefault('transcript_history', [])
44
- st.session_state.setdefault('chat_history', [])
45
- st.session_state.setdefault('openai_model', "gpt-4o-2024-05-13")
46
- st.session_state.setdefault('messages', [])
47
- st.session_state.setdefault('last_voice_input', "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  # 🎨 Minimal Custom CSS
50
  st.markdown("""
51
  <style>
52
  .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
53
  .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
 
 
 
54
  </style>
55
  """, unsafe_allow_html=True)
56
 
57
- # πŸ”‘ Common Utilities
 
 
 
 
58
  def generate_filename(prompt, file_type="md"):
59
  ctz = pytz.timezone('US/Central')
60
  date_str = datetime.now(ctz).strftime("%m%d_%H%M")
@@ -63,13 +88,14 @@ def generate_filename(prompt, file_type="md"):
63
  return f"{date_str}_{safe}.{file_type}"
64
 
65
  def create_file(filename, prompt, response):
 
66
  with open(filename, 'w', encoding='utf-8') as f:
67
  f.write(prompt + "\n\n" + response)
68
 
69
  def get_download_link(file):
70
  with open(file, "rb") as f:
71
  b64 = base64.b64encode(f.read()).decode()
72
- return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
73
 
74
  @st.cache_resource
75
  def speech_synthesis_html(result):
@@ -83,9 +109,8 @@ def speech_synthesis_html(result):
83
  """
84
  components.html(html_code, height=0)
85
 
86
- #------------add EdgeTTS
87
- # --- NEW FUNCTIONS FOR EDGE TTS ---
88
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
 
89
  if not text.strip():
90
  return None
91
  rate_str = f"{rate:+d}%"
@@ -101,8 +126,8 @@ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
101
  def play_and_download_audio(file_path):
102
  if file_path and os.path.exists(file_path):
103
  st.audio(file_path)
104
- st.markdown(get_download_link(file_path), unsafe_allow_html=True)
105
- #---------------------------
106
 
107
  def process_image(image_path, user_prompt):
108
  with open(image_path, "rb") as imgf:
@@ -125,6 +150,7 @@ def process_audio(audio_path):
125
  with open(audio_path, "rb") as f:
126
  transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
127
  st.session_state.messages.append({"role": "user", "content": transcription.text})
 
128
  return transcription.text
129
 
130
  def process_video(video_path, seconds_per_frame=1):
@@ -177,26 +203,18 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
177
 
178
  st.markdown(result)
179
 
180
- # Main Vocal Summary (Short Answer)
181
  if vocal_summary:
182
- start_main_part = time.time()
183
  audio_file_main = speak_with_edge_tts(r2, voice="en-US-AriaNeural", rate=0, pitch=0)
184
  st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
185
  play_and_download_audio(audio_file_main)
186
- st.write(f"**Elapsed (Short Answer):** {time.time() - start_main_part:.2f} s")
187
 
188
- # Extended References & Summaries (optional)
189
  if extended_refs:
190
- start_refs_part = time.time()
191
  summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
192
  audio_file_refs = speak_with_edge_tts(summaries_text, voice="en-US-AriaNeural", rate=0, pitch=0)
193
  st.write("### πŸ“œ Extended References & Summaries")
194
  play_and_download_audio(audio_file_refs)
195
- st.write(f"**Elapsed (Extended References):** {time.time() - start_refs_part:.2f} s")
196
 
197
- # Paper Titles Only (short)
198
  if titles_summary:
199
- start_titles_part = time.time()
200
  titles = []
201
  for line in refs.split('\n'):
202
  m = re.search(r"\[([^\]]+)\]", line)
@@ -207,7 +225,6 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
207
  audio_file_titles = speak_with_edge_tts(titles_text, voice="en-US-AriaNeural", rate=0, pitch=0)
208
  st.write("### πŸ”– Paper Titles")
209
  play_and_download_audio(audio_file_titles)
210
- st.write(f"**Elapsed (Titles):** {time.time() - start_titles_part:.2f} s")
211
 
212
  elapsed = time.time()-start
213
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
@@ -230,7 +247,7 @@ def process_with_gpt(text):
230
  st.write("GPT-4o: " + ans)
231
  create_file(generate_filename(text,"md"),text,ans)
232
  st.session_state.messages.append({"role":"assistant","content":ans})
233
- return ans
234
 
235
  def process_with_claude(text):
236
  if not text: return
@@ -246,13 +263,23 @@ def process_with_claude(text):
246
  st.write("Claude: " + ans)
247
  create_file(generate_filename(text,"md"),text,ans)
248
  st.session_state.chat_history.append({"user":text,"claude":ans})
249
- return ans
 
 
 
 
250
 
251
- def create_zip_of_files():
252
- md_files = glob.glob("*.md")
253
- mp3_files = glob.glob("*.mp3")
254
  all_files = md_files + mp3_files
255
- zip_name = "all_files.zip"
 
 
 
 
 
 
 
 
 
256
  with zipfile.ZipFile(zip_name,'w') as z:
257
  for f in all_files:
258
  z.write(f)
@@ -265,87 +292,107 @@ def get_media_html(p,typ="video",w="100%"):
265
  else:
266
  return f'<audio controls style="width:{w};"><source src="data:audio/mpeg;base64,{d}" type="audio/mpeg"></audio>'
267
 
268
- def display_file_manager():
269
- st.sidebar.title("🎡 Audio Files & Documents")
270
- st.sidebar.markdown("Here you can find all recorded `.mp3` files and `.md` notes.")
271
-
272
- # Display .mp3 files in the sidebar
273
- mp3_files = sorted(glob.glob("*.mp3"), reverse=True)
274
- if mp3_files:
275
- st.sidebar.subheader("MP3 Files:")
276
- if st.sidebar.button("πŸ—‘ Delete All MP3"):
277
- for a in mp3_files:
278
- os.remove(a)
279
- st.experimental_rerun()
280
- for a in mp3_files:
281
- exp = st.sidebar.expander(os.path.basename(a))
282
- with exp:
283
- # Show audio player
284
- st.markdown(get_media_html(a,"audio"),unsafe_allow_html=True)
285
-
286
- # Actions row
287
- c1, c2, c3 = st.columns([3,3,1])
288
- with c1:
289
- # Download link for the MP3 file
290
- st.markdown(get_download_link(a), unsafe_allow_html=True)
291
- with c2:
292
- # Button to transcribe this file
293
- if st.button(f"Transcribe {os.path.basename(a)}", key="transcribe_"+a):
294
- t = process_audio(a)
295
- st.write("Transcription:")
296
- st.write(t)
297
- with c3:
298
- # Delete button for mp3
299
- if st.button("πŸ—‘", key="del_mp3_"+a):
300
- os.remove(a)
301
- st.experimental_rerun()
302
- else:
303
- st.sidebar.write("No MP3 files found.")
304
-
305
- # Display .md files in the sidebar
306
- st.sidebar.subheader("MD Files:")
307
- files = sorted(glob.glob("*.md"), reverse=True)
308
- if st.sidebar.button("πŸ—‘ Delete All MD"):
309
- for f in files:
310
- os.remove(f)
311
- st.experimental_rerun()
312
-
313
- if files:
314
- for f in files:
315
- col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
316
- with col1:
317
- if st.button("🌐", key="v"+f):
318
- st.session_state.current_file = f
319
- c = open(f,'r',encoding='utf-8').read()
320
- st.write("**Viewing file content:**")
321
- st.write(c)
322
- with col2:
323
- st.markdown(get_download_link(f),unsafe_allow_html=True)
324
- with col3:
325
- if st.button("πŸ“‚", key="e"+f):
326
- st.session_state.current_file = f
327
- st.session_state.file_content = open(f,'r',encoding='utf-8').read()
328
- with col4:
329
- if st.button("πŸ—‘", key="d"+f):
330
- os.remove(f)
331
- st.experimental_rerun()
332
- else:
333
- st.sidebar.write("No MD files found.")
334
-
335
- # Download all as zip (including .mp3 and .md)
336
- if len(files) > 0 or len(mp3_files) > 0:
337
- if st.sidebar.button("⬇️ Download All (.md and .mp3)"):
338
- z = create_zip_of_files()
339
- st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
340
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
 
342
  def main():
343
  st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
344
  tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
345
-
346
  model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
347
 
348
- # Declare the component
349
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
350
  val = mycomponent(my_input_value="Hello")
351
  if val:
@@ -388,8 +435,7 @@ def main():
388
  if q:
389
  q = q.strip()
390
  if q and st.button("Run ArXiv Query"):
391
- r = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
392
- st.markdown(r)
393
 
394
  elif tab_main == "🎀 Voice Input":
395
  st.subheader("🎀 Voice Recognition")
@@ -438,7 +484,7 @@ def main():
438
  for i,f in enumerate(imgs):
439
  with cols[i%c]:
440
  st.image(Image.open(f),use_container_width=True)
441
- if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}"):
442
  a = process_image(f,"Describe this image.")
443
  st.markdown(a)
444
  else:
@@ -449,7 +495,7 @@ def main():
449
  for v in vids:
450
  with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
451
  st.markdown(get_media_html(v,"video"),unsafe_allow_html=True)
452
- if st.button(f"Analyze {os.path.basename(v)}"):
453
  a = process_video_with_gpt(v,"Describe video.")
454
  st.markdown(a)
455
  else:
@@ -463,10 +509,34 @@ def main():
463
  with open(st.session_state.current_file,'w',encoding='utf-8') as f:
464
  f.write(new_text)
465
  st.success("Updated!")
 
466
  else:
467
  st.write("Select a file from the sidebar to edit.")
468
 
469
- display_file_manager()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470
 
471
  if __name__=="__main__":
472
  main()
 
5
  from datetime import datetime
6
  from audio_recorder_streamlit import audio_recorder
7
  from bs4 import BeautifulSoup
8
+ from collections import defaultdict, deque
9
  from dotenv import load_dotenv
10
  from gradio_client import Client
11
  from huggingface_hub import InferenceClient
 
18
  import extra_streamlit_components as stx
19
  from streamlit.runtime.scriptrunner import get_script_run_ctx
20
  import asyncio
21
+ import edge_tts
22
 
23
  # πŸ”§ Config & Setup
24
  st.set_page_config(
 
40
  HF_KEY = os.getenv('HF_KEY')
41
  API_URL = os.getenv('API_URL')
42
 
43
+ # Session states
44
+ if 'transcript_history' not in st.session_state:
45
+ st.session_state['transcript_history'] = []
46
+ if 'chat_history' not in st.session_state:
47
+ st.session_state['chat_history'] = []
48
+ if 'openai_model' not in st.session_state:
49
+ st.session_state['openai_model'] = "gpt-4o-2024-05-13"
50
+ if 'messages' not in st.session_state:
51
+ st.session_state['messages'] = []
52
+ if 'last_voice_input' not in st.session_state:
53
+ st.session_state['last_voice_input'] = ""
54
+ if 'editing_file' not in st.session_state:
55
+ st.session_state['editing_file'] = None
56
+ if 'edit_new_name' not in st.session_state:
57
+ st.session_state['edit_new_name'] = ""
58
+ if 'edit_new_content' not in st.session_state:
59
+ st.session_state['edit_new_content'] = ""
60
+ if 'viewing_file' not in st.session_state:
61
+ st.session_state['viewing_file'] = None
62
+ if 'viewing_file_type' not in st.session_state:
63
+ st.session_state['viewing_file_type'] = None
64
+ if 'should_rerun' not in st.session_state:
65
+ st.session_state['should_rerun'] = False
66
 
67
  # 🎨 Minimal Custom CSS
68
  st.markdown("""
69
  <style>
70
  .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
71
  .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
72
+ .stButton>button {
73
+ margin-right: 0.5rem;
74
+ }
75
  </style>
76
  """, unsafe_allow_html=True)
77
 
78
+ FILE_EMOJIS = {
79
+ "md": "πŸ“",
80
+ "mp3": "🎡",
81
+ }
82
+
83
  def generate_filename(prompt, file_type="md"):
84
  ctz = pytz.timezone('US/Central')
85
  date_str = datetime.now(ctz).strftime("%m%d_%H%M")
 
88
  return f"{date_str}_{safe}.{file_type}"
89
 
90
  def create_file(filename, prompt, response):
91
+ # Creating file does not trigger immediate rerun
92
  with open(filename, 'w', encoding='utf-8') as f:
93
  f.write(prompt + "\n\n" + response)
94
 
95
  def get_download_link(file):
96
  with open(file, "rb") as f:
97
  b64 = base64.b64encode(f.read()).decode()
98
+ return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
99
 
100
  @st.cache_resource
101
  def speech_synthesis_html(result):
 
109
  """
110
  components.html(html_code, height=0)
111
 
 
 
112
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
113
+ # Just create mp3 file, no immediate rerun
114
  if not text.strip():
115
  return None
116
  rate_str = f"{rate:+d}%"
 
126
  def play_and_download_audio(file_path):
127
  if file_path and os.path.exists(file_path):
128
  st.audio(file_path)
129
+ dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
130
+ st.markdown(dl_link, unsafe_allow_html=True)
131
 
132
  def process_image(image_path, user_prompt):
133
  with open(image_path, "rb") as imgf:
 
150
  with open(audio_path, "rb") as f:
151
  transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
152
  st.session_state.messages.append({"role": "user", "content": transcription.text})
153
+ # No immediate rerun
154
  return transcription.text
155
 
156
  def process_video(video_path, seconds_per_frame=1):
 
203
 
204
  st.markdown(result)
205
 
 
206
  if vocal_summary:
 
207
  audio_file_main = speak_with_edge_tts(r2, voice="en-US-AriaNeural", rate=0, pitch=0)
208
  st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
209
  play_and_download_audio(audio_file_main)
 
210
 
 
211
  if extended_refs:
 
212
  summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
213
  audio_file_refs = speak_with_edge_tts(summaries_text, voice="en-US-AriaNeural", rate=0, pitch=0)
214
  st.write("### πŸ“œ Extended References & Summaries")
215
  play_and_download_audio(audio_file_refs)
 
216
 
 
217
  if titles_summary:
 
218
  titles = []
219
  for line in refs.split('\n'):
220
  m = re.search(r"\[([^\]]+)\]", line)
 
225
  audio_file_titles = speak_with_edge_tts(titles_text, voice="en-US-AriaNeural", rate=0, pitch=0)
226
  st.write("### πŸ”– Paper Titles")
227
  play_and_download_audio(audio_file_titles)
 
228
 
229
  elapsed = time.time()-start
230
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
 
247
  st.write("GPT-4o: " + ans)
248
  create_file(generate_filename(text,"md"),text,ans)
249
  st.session_state.messages.append({"role":"assistant","content":ans})
250
+ return ans
251
 
252
  def process_with_claude(text):
253
  if not text: return
 
263
  st.write("Claude: " + ans)
264
  create_file(generate_filename(text,"md"),text,ans)
265
  st.session_state.chat_history.append({"user":text,"claude":ans})
266
+ return ans
267
+
268
+ def create_zip_of_files(md_files, mp3_files):
269
+ # Exclude README.md if present
270
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
271
 
 
 
 
272
  all_files = md_files + mp3_files
273
+ if not all_files:
274
+ return None
275
+ # Build a descriptive name from file stems
276
+ stems = [os.path.splitext(os.path.basename(f))[0] for f in all_files]
277
+ # Join them
278
+ joined = "_".join(stems)
279
+ # Truncate if too long
280
+ if len(joined) > 50:
281
+ joined = joined[:50] + "_etc"
282
+ zip_name = f"{joined}.zip"
283
  with zipfile.ZipFile(zip_name,'w') as z:
284
  for f in all_files:
285
  z.write(f)
 
292
  else:
293
  return f'<audio controls style="width:{w};"><source src="data:audio/mpeg;base64,{d}" type="audio/mpeg"></audio>'
294
 
295
+ def load_files_for_sidebar():
296
+ # Gather all md and mp3 files
297
+ md_files = glob.glob("*.md")
298
+ mp3_files = glob.glob("*.mp3")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
+ # Exclude README.md from listings
301
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
302
+
303
+ files_by_ext = defaultdict(list)
304
+ if md_files: files_by_ext['md'].extend(md_files)
305
+ if mp3_files: files_by_ext['mp3'].extend(mp3_files)
306
+
307
+ # Sort each extension group by modification time descending
308
+ for ext in files_by_ext:
309
+ files_by_ext[ext].sort(key=lambda x: os.path.getmtime(x), reverse=True)
310
+ return files_by_ext
311
+
312
+ def display_file_manager_sidebar(files_by_ext):
313
+ st.sidebar.title("🎡 Audio & Document Manager")
314
+
315
+ md_files = files_by_ext.get('md', [])
316
+ mp3_files = files_by_ext.get('mp3', [])
317
+
318
+ # Buttons to delete all except README.md (already excluded)
319
+ col_del = st.sidebar.columns(3)
320
+ with col_del[0]:
321
+ if st.button("πŸ—‘ Del All MD"):
322
+ for f in md_files:
323
+ os.remove(f)
324
+ st.session_state.should_rerun = True
325
+ with col_del[1]:
326
+ if st.button("πŸ—‘ Del All MP3"):
327
+ for f in mp3_files:
328
+ os.remove(f)
329
+ st.session_state.should_rerun = True
330
+ with col_del[2]:
331
+ if st.button("⬇️ Zip All"):
332
+ # create a zip of all md and mp3 except README.md
333
+ z = create_zip_of_files(md_files, mp3_files)
334
+ if z:
335
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
336
+
337
+ ext_counts = {ext: len(files) for ext, files in files_by_ext.items()}
338
+ sorted_ext = sorted(files_by_ext.keys(), key=lambda x: ext_counts[x], reverse=True)
339
+
340
+ # Display files with actions
341
+ for ext in sorted_ext:
342
+ emoji = FILE_EMOJIS.get(ext, "πŸ“¦")
343
+ count = len(files_by_ext[ext])
344
+ with st.sidebar.expander(f"{emoji} {ext.upper()} Files ({count})", expanded=True):
345
+ for f in files_by_ext[ext]:
346
+ fname = os.path.basename(f)
347
+ ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
348
+ # Show filename and buttons in a row
349
+ st.write(f"**{fname}** - {ctime}")
350
+ file_buttons_col = st.columns([1,1,1])
351
+ with file_buttons_col[0]:
352
+ if st.button("πŸ‘€View", key="view_"+f):
353
+ st.session_state.viewing_file = f
354
+ st.session_state.viewing_file_type = ext
355
+ # No rerun needed, just set state
356
+ with file_buttons_col[1]:
357
+ if ext == "md":
358
+ if st.button("✏️Edit", key="edit_"+f):
359
+ st.session_state.editing_file = f
360
+ st.session_state.edit_new_name = fname.replace(".md","")
361
+ st.session_state.edit_new_content = open(f,'r',encoding='utf-8').read()
362
+ st.session_state.should_rerun = True
363
+ with file_buttons_col[2]:
364
+ if st.button("πŸ—‘Del", key="del_"+f):
365
+ os.remove(f)
366
+ st.session_state.should_rerun = True
367
+
368
+ # If editing an md file
369
+ if st.session_state.editing_file and os.path.exists(st.session_state.editing_file):
370
+ st.sidebar.subheader(f"Editing: {os.path.basename(st.session_state.editing_file)}")
371
+ st.session_state.edit_new_name = st.sidebar.text_input("New name (no extension):", value=st.session_state.edit_new_name)
372
+ st.session_state.edit_new_content = st.sidebar.text_area("Content:", st.session_state.edit_new_content, height=200)
373
+ c1,c2 = st.sidebar.columns(2)
374
+ with c1:
375
+ if st.button("Save"):
376
+ old_path = st.session_state.editing_file
377
+ new_path = st.session_state.edit_new_name + ".md"
378
+ if new_path != os.path.basename(old_path):
379
+ os.rename(old_path, new_path)
380
+ with open(new_path,'w',encoding='utf-8') as f:
381
+ f.write(st.session_state.edit_new_content)
382
+ st.session_state.editing_file = None
383
+ st.session_state.should_rerun = True
384
+ with c2:
385
+ if st.button("Cancel"):
386
+ st.session_state.editing_file = None
387
+ st.session_state.should_rerun = True
388
 
389
  def main():
390
  st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
391
  tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
392
+
393
  model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
394
 
395
+ # Main Input Component
396
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
397
  val = mycomponent(my_input_value="Hello")
398
  if val:
 
435
  if q:
436
  q = q.strip()
437
  if q and st.button("Run ArXiv Query"):
438
+ perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
 
439
 
440
  elif tab_main == "🎀 Voice Input":
441
  st.subheader("🎀 Voice Recognition")
 
484
  for i,f in enumerate(imgs):
485
  with cols[i%c]:
486
  st.image(Image.open(f),use_container_width=True)
487
+ if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
488
  a = process_image(f,"Describe this image.")
489
  st.markdown(a)
490
  else:
 
495
  for v in vids:
496
  with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
497
  st.markdown(get_media_html(v,"video"),unsafe_allow_html=True)
498
+ if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
499
  a = process_video_with_gpt(v,"Describe video.")
500
  st.markdown(a)
501
  else:
 
509
  with open(st.session_state.current_file,'w',encoding='utf-8') as f:
510
  f.write(new_text)
511
  st.success("Updated!")
512
+ st.session_state.should_rerun = True
513
  else:
514
  st.write("Select a file from the sidebar to edit.")
515
 
516
+ # After main content, load files and display in sidebar
517
+ files_by_ext = load_files_for_sidebar()
518
+ display_file_manager_sidebar(files_by_ext)
519
+
520
+ # If viewing a file, show its content below (in the main area)
521
+ if st.session_state.viewing_file and os.path.exists(st.session_state.viewing_file):
522
+ st.write("---")
523
+ st.write(f"**Viewing File:** {os.path.basename(st.session_state.viewing_file)}")
524
+ if st.session_state.viewing_file_type == "md":
525
+ # show markdown
526
+ content = open(st.session_state.viewing_file,'r',encoding='utf-8').read()
527
+ st.markdown(content)
528
+ elif st.session_state.viewing_file_type == "mp3":
529
+ # show audio
530
+ st.audio(st.session_state.viewing_file)
531
+ # Optionally add a "Close View" button
532
+ if st.button("Close View"):
533
+ st.session_state.viewing_file = None
534
+ st.session_state.viewing_file_type = None
535
+
536
+ # If user-triggered changes happened, rerun once at the end
537
+ if st.session_state.should_rerun:
538
+ st.session_state.should_rerun = False
539
+ st.rerun()
540
 
541
  if __name__=="__main__":
542
  main()