awacke1 commited on
Commit
8010524
β€’
1 Parent(s): 58c05f2

Create backup18-GroupFileMgmt.app.py

Browse files
Files changed (1) hide show
  1. backup18-GroupFileMgmt.app.py +570 -0
backup18-GroupFileMgmt.app.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
5
+ from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import defaultdict, deque
9
+ from dotenv import load_dotenv
10
+ from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
+ from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
+ from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+ import asyncio
21
+ import edge_tts
22
+
23
+ # πŸ”§ Config & Setup
24
+ st.set_page_config(
25
+ page_title="🚲BikeAIπŸ† Claude/GPT Research",
26
+ page_icon="πŸš²πŸ†",
27
+ layout="wide",
28
+ initial_sidebar_state="auto",
29
+ menu_items={
30
+ 'Get Help': 'https://huggingface.co/awacke1',
31
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
32
+ 'About': "🚲BikeAIπŸ† Claude/GPT Research AI"
33
+ }
34
+ )
35
+ load_dotenv()
36
+
37
+ openai_api_key = os.getenv('OPENAI_API_KEY', "")
38
+ anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
39
+ if 'OPENAI_API_KEY' in st.secrets:
40
+ openai_api_key = st.secrets['OPENAI_API_KEY']
41
+ if 'ANTHROPIC_API_KEY' in st.secrets:
42
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
43
+
44
+ openai.api_key = openai_api_key
45
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
46
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
47
+ HF_KEY = os.getenv('HF_KEY')
48
+ API_URL = os.getenv('API_URL')
49
+
50
+ if 'transcript_history' not in st.session_state:
51
+ st.session_state['transcript_history'] = []
52
+ if 'chat_history' not in st.session_state:
53
+ st.session_state['chat_history'] = []
54
+ if 'openai_model' not in st.session_state:
55
+ st.session_state['openai_model'] = "gpt-4o-2024-05-13"
56
+ if 'messages' not in st.session_state:
57
+ st.session_state['messages'] = []
58
+ if 'last_voice_input' not in st.session_state:
59
+ st.session_state['last_voice_input'] = ""
60
+ if 'editing_file' not in st.session_state:
61
+ st.session_state['editing_file'] = None
62
+ if 'edit_new_name' not in st.session_state:
63
+ st.session_state['edit_new_name'] = ""
64
+ if 'edit_new_content' not in st.session_state:
65
+ st.session_state['edit_new_content'] = ""
66
+ if 'viewing_prefix' not in st.session_state:
67
+ st.session_state['viewing_prefix'] = None
68
+ if 'should_rerun' not in st.session_state:
69
+ st.session_state['should_rerun'] = False
70
+
71
+ # 🎨 Minimal Custom CSS
72
+ st.markdown("""
73
+ <style>
74
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
75
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
76
+ .stButton>button {
77
+ margin-right: 0.5rem;
78
+ }
79
+ </style>
80
+ """, unsafe_allow_html=True)
81
+
82
+ FILE_EMOJIS = {
83
+ "md": "πŸ“",
84
+ "mp3": "🎡",
85
+ }
86
+
87
+ def clean_for_speech(text: str) -> str:
88
+ text = text.replace("\n", " ")
89
+ text = text.replace("</s>", " ")
90
+ text = text.replace("#", "")
91
+ # Remove links like (https://...)
92
+ text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
93
+ text = re.sub(r"\s+", " ", text).strip()
94
+ return text
95
+
96
+ def generate_filename(content, file_type="md"):
97
+ # Prefix: YYMM_HHmm_ -> total 10 chars including underscore
98
+ # Actually: %y%m_%H%M gives 9 chars, add trailing underscore for total 10 chars.
99
+ # Example: 23 09 _12 45 _ => '2309_1245_'
100
+ prefix = datetime.now().strftime("%y%m_%H%M") + "_"
101
+ # Extract some words from content
102
+ words = re.findall(r"\w+", content)
103
+ # Take first 3 words for filename segment
104
+ name_text = '_'.join(words[:3]) if words else 'file'
105
+ filename = f"{prefix}{name_text}.{file_type}"
106
+ return filename
107
+
108
+ def create_file(prompt, response, file_type="md"):
109
+ # Decide which content to base the filename on (prefer response)
110
+ base_content = response.strip() if response.strip() else prompt.strip()
111
+ filename = generate_filename(base_content, file_type)
112
+ with open(filename, 'w', encoding='utf-8') as f:
113
+ f.write(prompt + "\n\n" + response)
114
+ return filename
115
+
116
+ def get_download_link(file):
117
+ with open(file, "rb") as f:
118
+ b64 = base64.b64encode(f.read()).decode()
119
+ return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
120
+
121
+ @st.cache_resource
122
+ def speech_synthesis_html(result):
123
+ html_code = f"""
124
+ <html><body>
125
+ <script>
126
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
127
+ window.speechSynthesis.speak(msg);
128
+ </script>
129
+ </body></html>
130
+ """
131
+ components.html(html_code, height=0)
132
+
133
+ async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
134
+ text = clean_for_speech(text)
135
+ if not text.strip():
136
+ return None
137
+ rate_str = f"{rate:+d}%"
138
+ pitch_str = f"{pitch:+d}Hz"
139
+ communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
140
+ out_fn = generate_filename(text,"mp3")
141
+ await communicate.save(out_fn)
142
+ return out_fn
143
+
144
+ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
145
+ return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
146
+
147
+ def play_and_download_audio(file_path):
148
+ if file_path and os.path.exists(file_path):
149
+ st.audio(file_path)
150
+ dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
151
+ st.markdown(dl_link, unsafe_allow_html=True)
152
+
153
+ def process_image(image_path, user_prompt):
154
+ with open(image_path, "rb") as imgf:
155
+ image_data = imgf.read()
156
+ b64img = base64.b64encode(image_data).decode("utf-8")
157
+ resp = openai_client.chat.completions.create(
158
+ model=st.session_state["openai_model"],
159
+ messages=[
160
+ {"role": "system", "content": "You are a helpful assistant."},
161
+ {"role": "user", "content": [
162
+ {"type": "text", "text": user_prompt},
163
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
164
+ ]}
165
+ ],
166
+ temperature=0.0,
167
+ )
168
+ return resp.choices[0].message.content
169
+
170
+ def process_audio(audio_path):
171
+ with open(audio_path, "rb") as f:
172
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
173
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
174
+ return transcription.text
175
+
176
+ def process_video(video_path, seconds_per_frame=1):
177
+ vid = cv2.VideoCapture(video_path)
178
+ total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
179
+ fps = vid.get(cv2.CAP_PROP_FPS)
180
+ skip = int(fps*seconds_per_frame)
181
+ frames_b64 = []
182
+ for i in range(0, total, skip):
183
+ vid.set(cv2.CAP_PROP_POS_FRAMES, i)
184
+ ret, frame = vid.read()
185
+ if not ret: break
186
+ _, buf = cv2.imencode(".jpg", frame)
187
+ frames_b64.append(base64.b64encode(buf).decode("utf-8"))
188
+ vid.release()
189
+ return frames_b64
190
+
191
+ def process_video_with_gpt(video_path, prompt):
192
+ frames = process_video(video_path)
193
+ resp = openai_client.chat.completions.create(
194
+ model=st.session_state["openai_model"],
195
+ messages=[
196
+ {"role":"system","content":"Analyze video frames."},
197
+ {"role":"user","content":[
198
+ {"type":"text","text":prompt},
199
+ *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
200
+ ]}
201
+ ]
202
+ )
203
+ return resp.choices[0].message.content
204
+
205
+ def search_arxiv(query):
206
+ st.write("πŸ” Searching ArXiv...")
207
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
208
+ r1 = client.predict(prompt=query, llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1", stream_outputs=True, api_name="/ask_llm")
209
+ st.markdown("### Mistral-8x7B-Instruct-v0.1 Result")
210
+ st.markdown(r1)
211
+ r2 = client.predict(prompt=query, llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2", stream_outputs=True, api_name="/ask_llm")
212
+ st.markdown("### Mistral-7B-Instruct-v0.2 Result")
213
+ st.markdown(r2)
214
+ return f"{r1}\n\n{r2}"
215
+
216
+ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True):
217
+ start = time.time()
218
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
219
+ r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
220
+ refs = r[0]
221
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
222
+ result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
223
+
224
+ st.markdown(result)
225
+
226
+ # Clean for speech before TTS
227
+ if vocal_summary:
228
+ main_text = clean_for_speech(r2)
229
+ audio_file_main = speak_with_edge_tts(main_text)
230
+ st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
231
+ play_and_download_audio(audio_file_main)
232
+
233
+ if extended_refs:
234
+ summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
235
+ summaries_text = clean_for_speech(summaries_text)
236
+ audio_file_refs = speak_with_edge_tts(summaries_text)
237
+ st.write("### πŸ“œ Extended References & Summaries")
238
+ play_and_download_audio(audio_file_refs)
239
+
240
+ if titles_summary:
241
+ titles = []
242
+ for line in refs.split('\n'):
243
+ m = re.search(r"\[([^\]]+)\]", line)
244
+ if m:
245
+ titles.append(m.group(1))
246
+ if titles:
247
+ titles_text = "Here are the titles of the papers: " + ", ".join(titles)
248
+ titles_text = clean_for_speech(titles_text)
249
+ audio_file_titles = speak_with_edge_tts(titles_text)
250
+ st.write("### πŸ”– Paper Titles")
251
+ play_and_download_audio(audio_file_titles)
252
+
253
+ elapsed = time.time()-start
254
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
255
+ # Create MD file from q and result
256
+ create_file(q, result, "md")
257
+ return result
258
+
259
+ def process_with_gpt(text):
260
+ if not text: return
261
+ st.session_state.messages.append({"role":"user","content":text})
262
+ with st.chat_message("user"):
263
+ st.markdown(text)
264
+ with st.chat_message("assistant"):
265
+ c = openai_client.chat.completions.create(
266
+ model=st.session_state["openai_model"],
267
+ messages=st.session_state.messages,
268
+ stream=False
269
+ )
270
+ ans = c.choices[0].message.content
271
+ st.write("GPT-4o: " + ans)
272
+ create_file(text, ans, "md")
273
+ st.session_state.messages.append({"role":"assistant","content":ans})
274
+ return ans
275
+
276
+ def process_with_claude(text):
277
+ if not text: return
278
+ with st.chat_message("user"):
279
+ st.markdown(text)
280
+ with st.chat_message("assistant"):
281
+ r = claude_client.messages.create(
282
+ model="claude-3-sonnet-20240229",
283
+ max_tokens=1000,
284
+ messages=[{"role":"user","content":text}]
285
+ )
286
+ ans = r.content[0].text
287
+ st.write("Claude: " + ans)
288
+ create_file(text, ans, "md")
289
+ st.session_state.chat_history.append({"user":text,"claude":ans})
290
+ return ans
291
+
292
+ def create_zip_of_files(md_files, mp3_files):
293
+ # Exclude README.md
294
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
295
+ all_files = md_files + mp3_files
296
+ if not all_files:
297
+ return None
298
+ # Build a descriptive name
299
+ stems = [os.path.splitext(os.path.basename(f))[0] for f in all_files]
300
+ joined = "_".join(stems)
301
+ if len(joined) > 50:
302
+ joined = joined[:50] + "_etc"
303
+ zip_name = f"{joined}.zip"
304
+ with zipfile.ZipFile(zip_name,'w') as z:
305
+ for f in all_files:
306
+ z.write(f)
307
+ return zip_name
308
+
309
+ def load_files_for_sidebar():
310
+ # Gather files
311
+ md_files = glob.glob("*.md")
312
+ mp3_files = glob.glob("*.mp3")
313
+
314
+ # Exclude README.md
315
+ md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
316
+
317
+ all_files = md_files + mp3_files
318
+
319
+ # Group by first 10 chars of filename
320
+ groups = defaultdict(list)
321
+ for f in all_files:
322
+ fname = os.path.basename(f)
323
+ prefix = fname[:10] # first 10 chars as group prefix
324
+ groups[prefix].append(f)
325
+
326
+ # Sort files in each group by mod time descending
327
+ for prefix in groups:
328
+ groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
329
+
330
+ # Sort prefixes by newest file time
331
+ sorted_prefixes = sorted(groups.keys(), key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]), reverse=True)
332
+
333
+ return groups, sorted_prefixes
334
+
335
+ def extract_keywords_from_md(files):
336
+ # Combine all MD content
337
+ text = ""
338
+ for f in files:
339
+ if f.endswith(".md"):
340
+ c = open(f,'r',encoding='utf-8').read()
341
+ text += " " + c
342
+ # Extract first 5 unique words
343
+ words = re.findall(r"\w+", text.lower())
344
+ unique_words = []
345
+ for w in words:
346
+ if w not in unique_words:
347
+ unique_words.append(w)
348
+ if len(unique_words) == 5:
349
+ break
350
+ return unique_words
351
+
352
+ def display_file_manager_sidebar(groups, sorted_prefixes):
353
+ st.sidebar.title("🎡 Audio & Document Manager")
354
+
355
+ # Collect all md and mp3 files for zip operations
356
+ all_md = []
357
+ all_mp3 = []
358
+ for prefix in groups:
359
+ for f in groups[prefix]:
360
+ if f.endswith(".md"):
361
+ all_md.append(f)
362
+ elif f.endswith(".mp3"):
363
+ all_mp3.append(f)
364
+
365
+ top_bar = st.sidebar.columns(3)
366
+ with top_bar[0]:
367
+ if st.button("πŸ—‘ Del All MD"):
368
+ for f in all_md:
369
+ os.remove(f)
370
+ st.session_state.should_rerun = True
371
+ with top_bar[1]:
372
+ if st.button("πŸ—‘ Del All MP3"):
373
+ for f in all_mp3:
374
+ os.remove(f)
375
+ st.session_state.should_rerun = True
376
+ with top_bar[2]:
377
+ if st.button("⬇️ Zip All"):
378
+ z = create_zip_of_files(all_md, all_mp3)
379
+ if z:
380
+ st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
381
+
382
+ for prefix in sorted_prefixes:
383
+ files = groups[prefix]
384
+ # Extract 5-word keywords from MD in this group
385
+ kw = extract_keywords_from_md(files)
386
+ keywords_str = " ".join(kw) if kw else "No Keywords"
387
+ with st.sidebar.expander(f"{prefix} Files ({len(files)}) - Keywords: {keywords_str}", expanded=True):
388
+ # Delete group / View group
389
+ c1,c2 = st.columns(2)
390
+ with c1:
391
+ if st.button("πŸ‘€View Group", key="view_group_"+prefix):
392
+ st.session_state.viewing_prefix = prefix
393
+ # No rerun needed, just state update
394
+ with c2:
395
+ if st.button("πŸ—‘Del Group", key="del_group_"+prefix):
396
+ for f in files:
397
+ os.remove(f)
398
+ st.session_state.should_rerun = True
399
+
400
+ for f in files:
401
+ fname = os.path.basename(f)
402
+ ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
403
+ ext = os.path.splitext(fname)[1].lower().strip('.')
404
+ st.write(f"**{fname}** - {ctime}")
405
+ # Individual file actions are less necessary if we have group actions
406
+ # But we can still provide them if desired.
407
+ # The user requested grouping primarily, but we can keep minimal file actions if needed.
408
+ # In instructions now, main focus is group view/delete.
409
+ # We'll omit individual file view/edit here since we have group view.
410
+ # If needed, re-add them similarly as before.
411
+ # For now, rely on "View Group" to see all files.
412
+
413
+ def main():
414
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
415
+ tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
416
+
417
+ model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
418
+
419
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
420
+ val = mycomponent(my_input_value="Hello")
421
+ if val:
422
+ user_input = val.strip()
423
+ if user_input:
424
+ if model_choice == "GPT-4o":
425
+ process_with_gpt(user_input)
426
+ elif model_choice == "Claude-3":
427
+ process_with_claude(user_input)
428
+ elif model_choice == "Arxiv":
429
+ st.subheader("Arxiv Only Results:")
430
+ perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
431
+ else:
432
+ col1,col2,col3=st.columns(3)
433
+ with col1:
434
+ st.subheader("GPT-4o Omni:")
435
+ try:
436
+ process_with_gpt(user_input)
437
+ except:
438
+ st.write('GPT 4o error')
439
+ with col2:
440
+ st.subheader("Claude-3 Sonnet:")
441
+ try:
442
+ process_with_claude(user_input)
443
+ except:
444
+ st.write('Claude error')
445
+ with col3:
446
+ st.subheader("Arxiv + Mistral:")
447
+ try:
448
+ perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
449
+ except:
450
+ st.write("Arxiv error")
451
+
452
+ if tab_main == "πŸ” Search ArXiv":
453
+ st.subheader("πŸ” Search ArXiv")
454
+ q=st.text_input("Research query:")
455
+
456
+ st.markdown("### πŸŽ›οΈ Audio Generation Options")
457
+ vocal_summary = st.checkbox("πŸŽ™οΈ Vocal Summary (Short Answer)", value=True)
458
+ extended_refs = st.checkbox("πŸ“œ Extended References & Summaries (Long)", value=False)
459
+ titles_summary = st.checkbox("πŸ”– Paper Titles Only", value=True)
460
+
461
+ if q and st.button("Run ArXiv Query"):
462
+ perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
463
+
464
+ elif tab_main == "🎀 Voice Input":
465
+ st.subheader("🎀 Voice Recognition")
466
+ user_text = st.text_area("Message:", height=100)
467
+ user_text = user_text.strip()
468
+ if st.button("Send πŸ“¨"):
469
+ if user_text:
470
+ if model_choice == "GPT-4o":
471
+ process_with_gpt(user_text)
472
+ elif model_choice == "Claude-3":
473
+ process_with_claude(user_text)
474
+ elif model_choice == "Arxiv":
475
+ st.subheader("Arxiv Only Results:")
476
+ perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
477
+ else:
478
+ col1,col2,col3=st.columns(3)
479
+ with col1:
480
+ st.subheader("GPT-4o Omni:")
481
+ process_with_gpt(user_text)
482
+ with col2:
483
+ st.subheader("Claude-3 Sonnet:")
484
+ process_with_claude(user_text)
485
+ with col3:
486
+ st.subheader("Arxiv & Mistral:")
487
+ res = perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
488
+ st.markdown(res)
489
+ st.subheader("πŸ“œ Chat History")
490
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
491
+ with t1:
492
+ for c in st.session_state.chat_history:
493
+ st.write("**You:**", c["user"])
494
+ st.write("**Claude:**", c["claude"])
495
+ with t2:
496
+ for m in st.session_state.messages:
497
+ with st.chat_message(m["role"]):
498
+ st.markdown(m["content"])
499
+
500
+ elif tab_main == "πŸ“Έ Media Gallery":
501
+ st.header("🎬 Media Gallery - Images and Videos")
502
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
503
+ with tabs[0]:
504
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
505
+ if imgs:
506
+ c = st.slider("Cols",1,5,3)
507
+ cols = st.columns(c)
508
+ for i,f in enumerate(imgs):
509
+ with cols[i%c]:
510
+ st.image(Image.open(f),use_container_width=True)
511
+ if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
512
+ a = process_image(f,"Describe this image.")
513
+ st.markdown(a)
514
+ else:
515
+ st.write("No images found.")
516
+ with tabs[1]:
517
+ vids = glob.glob("*.mp4")
518
+ if vids:
519
+ for v in vids:
520
+ with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
521
+ st.markdown(get_media_html(v,"video"),unsafe_allow_html=True)
522
+ if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
523
+ a = process_video_with_gpt(v,"Describe video.")
524
+ st.markdown(a)
525
+ else:
526
+ st.write("No videos found.")
527
+
528
+ elif tab_main == "πŸ“ File Editor":
529
+ if getattr(st.session_state,'current_file',None):
530
+ st.subheader(f"Editing: {st.session_state.current_file}")
531
+ new_text = st.text_area("Content:", st.session_state.file_content, height=300)
532
+ if st.button("Save"):
533
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
534
+ f.write(new_text)
535
+ st.success("Updated!")
536
+ st.session_state.should_rerun = True
537
+ else:
538
+ st.write("Select a file from the sidebar to edit.")
539
+
540
+ # After main content, load and show file groups in sidebar
541
+ groups, sorted_prefixes = load_files_for_sidebar()
542
+ display_file_manager_sidebar(groups, sorted_prefixes)
543
+
544
+ # If viewing a prefix group, show all files in main area
545
+ if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
546
+ st.write("---")
547
+ st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
548
+ # Show all files in this prefix group in order (mp3 and md)
549
+ # Sort by mod time descending (already sorted)
550
+ for f in groups[st.session_state.viewing_prefix]:
551
+ fname = os.path.basename(f)
552
+ ext = os.path.splitext(fname)[1].lower().strip('.')
553
+ st.write(f"### {fname}")
554
+ if ext == "md":
555
+ content = open(f,'r',encoding='utf-8').read()
556
+ st.markdown(content)
557
+ elif ext == "mp3":
558
+ st.audio(f)
559
+ else:
560
+ # just show a download link
561
+ st.markdown(get_download_link(f), unsafe_allow_html=True)
562
+ if st.button("Close Group View"):
563
+ st.session_state.viewing_prefix = None
564
+
565
+ if st.session_state.should_rerun:
566
+ st.session_state.should_rerun = False
567
+ st.rerun()
568
+
569
+ if __name__=="__main__":
570
+ main()