awacke1 commited on
Commit
43252e1
β€’
1 Parent(s): be7fbed

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +475 -0
app.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
+ import plotly.graph_objects as go
4
+ import streamlit.components.v1 as components
5
+ from datetime import datetime
6
+ from audio_recorder_streamlit import audio_recorder
7
+ from bs4 import BeautifulSoup
8
+ from collections import deque
9
+ from dotenv import load_dotenv
10
+ from gradio_client import Client
11
+ from huggingface_hub import InferenceClient
12
+ from io import BytesIO
13
+ from PIL import Image
14
+ from PyPDF2 import PdfReader
15
+ from urllib.parse import quote
16
+ from xml.etree import ElementTree as ET
17
+ from openai import OpenAI
18
+ import extra_streamlit_components as stx
19
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
20
+ import asyncio
21
+ import edge_tts # ensure this is installed (pip install edge-tts)
22
+
23
+ # πŸ”§ Config & Setup
24
+ st.set_page_config(
25
+ page_title="🚲BikeAIπŸ† Claude/GPT Research",
26
+ page_icon="πŸš²πŸ†",
27
+ layout="wide",
28
+ initial_sidebar_state="auto",
29
+ menu_items={
30
+ 'Get Help': 'https://huggingface.co/awacke1',
31
+ 'Report a bug': 'https://huggingface.co/spaces/awacke1',
32
+ 'About': "🚲BikeAIπŸ† Claude/GPT Research AI"
33
+ }
34
+ )
35
+ load_dotenv()
36
+ openai.api_key = os.getenv('OPENAI_API_KEY') or st.secrets['OPENAI_API_KEY']
37
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3") or st.secrets["ANTHROPIC_API_KEY"]
38
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
39
+ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
40
+ HF_KEY = os.getenv('HF_KEY')
41
+ API_URL = os.getenv('API_URL')
42
+
43
+ st.session_state.setdefault('transcript_history', [])
44
+ st.session_state.setdefault('chat_history', [])
45
+ st.session_state.setdefault('openai_model', "gpt-4o-2024-05-13")
46
+ st.session_state.setdefault('messages', [])
47
+ st.session_state.setdefault('last_voice_input', "")
48
+
49
+ # 🎨 Minimal Custom CSS
50
+ st.markdown("""
51
+ <style>
52
+ .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
53
+ .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
54
+ </style>
55
+ """, unsafe_allow_html=True)
56
+
57
+ # πŸ”‘ Common Utilities
58
+ def generate_filename(prompt, file_type="md"):
59
+ ctz = pytz.timezone('US/Central')
60
+ date_str = datetime.now(ctz).strftime("%m%d_%H%M")
61
+ safe = re.sub(r'[<>:"/\\\\|?*\n]', ' ', prompt)
62
+ safe = re.sub(r'\s+', ' ', safe).strip()[:90]
63
+ return f"{date_str}_{safe}.{file_type}"
64
+
65
+ def create_file(filename, prompt, response):
66
+ with open(filename, 'w', encoding='utf-8') as f:
67
+ f.write(prompt + "\n\n" + response)
68
+
69
+ def get_download_link(file):
70
+ with open(file, "rb") as f:
71
+ b64 = base64.b64encode(f.read()).decode()
72
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
73
+
74
+ @st.cache_resource
75
+ def speech_synthesis_html(result):
76
+ html_code = f"""
77
+ <html><body>
78
+ <script>
79
+ var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
80
+ window.speechSynthesis.speak(msg);
81
+ </script>
82
+ </body></html>
83
+ """
84
+ components.html(html_code, height=0)
85
+
86
+ #------------add EdgeTTS
87
+ # --- NEW FUNCTIONS FOR EDGE TTS ---
88
+ async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
89
+ if not text.strip():
90
+ return None
91
+ rate_str = f"{rate:+d}%"
92
+ pitch_str = f"{pitch:+d}Hz"
93
+ communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
94
+ out_fn = generate_filename(text,"mp3")
95
+ await communicate.save(out_fn)
96
+ return out_fn
97
+
98
+ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
99
+ return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
100
+
101
+ def play_and_download_audio(file_path):
102
+ if file_path and os.path.exists(file_path):
103
+ st.audio(file_path)
104
+ st.markdown(get_download_link(file_path), unsafe_allow_html=True)
105
+ #---------------------------
106
+
107
+ def process_image(image_path, user_prompt):
108
+ with open(image_path, "rb") as imgf:
109
+ image_data = imgf.read()
110
+ b64img = base64.b64encode(image_data).decode("utf-8")
111
+ resp = openai_client.chat.completions.create(
112
+ model=st.session_state["openai_model"],
113
+ messages=[
114
+ {"role": "system", "content": "You are a helpful assistant."},
115
+ {"role": "user", "content": [
116
+ {"type": "text", "text": user_prompt},
117
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
118
+ ]}
119
+ ],
120
+ temperature=0.0,
121
+ )
122
+ return resp.choices[0].message.content
123
+
124
+ def process_audio(audio_path):
125
+ with open(audio_path, "rb") as f:
126
+ transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
127
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
128
+ return transcription.text
129
+
130
+ def process_video(video_path, seconds_per_frame=1):
131
+ vid = cv2.VideoCapture(video_path)
132
+ total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
133
+ fps = vid.get(cv2.CAP_PROP_FPS)
134
+ skip = int(fps*seconds_per_frame)
135
+ frames_b64 = []
136
+ for i in range(0, total, skip):
137
+ vid.set(cv2.CAP_PROP_POS_FRAMES, i)
138
+ ret, frame = vid.read()
139
+ if not ret: break
140
+ _, buf = cv2.imencode(".jpg", frame)
141
+ frames_b64.append(base64.b64encode(buf).decode("utf-8"))
142
+ vid.release()
143
+ return frames_b64
144
+
145
+ def process_video_with_gpt(video_path, prompt):
146
+ frames = process_video(video_path)
147
+ resp = openai_client.chat.completions.create(
148
+ model=st.session_state["openai_model"],
149
+ messages=[
150
+ {"role":"system","content":"Analyze video frames."},
151
+ {"role":"user","content":[
152
+ {"type":"text","text":prompt},
153
+ *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
154
+ ]}
155
+ ]
156
+ )
157
+ return resp.choices[0].message.content
158
+
159
+ def search_arxiv(query):
160
+ st.write("πŸ” Searching ArXiv...")
161
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
162
+ r1 = client.predict(prompt=query, llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1", stream_outputs=True, api_name="/ask_llm")
163
+ st.markdown("### Mistral-8x7B-Instruct-v0.1 Result")
164
+ st.markdown(r1)
165
+ r2 = client.predict(prompt=query, llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2", stream_outputs=True, api_name="/ask_llm")
166
+ st.markdown("### Mistral-7B-Instruct-v0.2 Result")
167
+ st.markdown(r2)
168
+ return f"{r1}\n\n{r2}"
169
+
170
+ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True):
171
+ start = time.time()
172
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
173
+ r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
174
+ refs = r[0]
175
+ r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
176
+ result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
177
+
178
+ st.markdown(result)
179
+
180
+ # Main Vocal Summary (Short Answer)
181
+ if vocal_summary:
182
+ start_main_part = time.time()
183
+ audio_file_main = speak_with_edge_tts(r2, voice="en-US-AriaNeural", rate=0, pitch=0)
184
+ st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
185
+ play_and_download_audio(audio_file_main)
186
+ st.write(f"**Elapsed (Short Answer):** {time.time() - start_main_part:.2f} s")
187
+
188
+ # Extended References & Summaries (optional)
189
+ if extended_refs:
190
+ start_refs_part = time.time()
191
+ summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
192
+ audio_file_refs = speak_with_edge_tts(summaries_text, voice="en-US-AriaNeural", rate=0, pitch=0)
193
+ st.write("### πŸ“œ Extended References & Summaries")
194
+ play_and_download_audio(audio_file_refs)
195
+ st.write(f"**Elapsed (Extended References):** {time.time() - start_refs_part:.2f} s")
196
+
197
+ # Paper Titles Only (short)
198
+ if titles_summary:
199
+ start_titles_part = time.time()
200
+ titles = []
201
+ for line in refs.split('\n'):
202
+ m = re.search(r"\[([^\]]+)\]", line)
203
+ if m:
204
+ titles.append(m.group(1))
205
+ if titles:
206
+ titles_text = "Here are the titles of the papers: " + ", ".join(titles)
207
+ audio_file_titles = speak_with_edge_tts(titles_text, voice="en-US-AriaNeural", rate=0, pitch=0)
208
+ st.write("### πŸ”– Paper Titles")
209
+ play_and_download_audio(audio_file_titles)
210
+ st.write(f"**Elapsed (Titles):** {time.time() - start_titles_part:.2f} s")
211
+
212
+ elapsed = time.time()-start
213
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
214
+ fn = generate_filename(q,"md")
215
+ create_file(fn,q,result)
216
+ return result
217
+
218
+ def process_with_gpt(text):
219
+ if not text: return
220
+ st.session_state.messages.append({"role":"user","content":text})
221
+ with st.chat_message("user"):
222
+ st.markdown(text)
223
+ with st.chat_message("assistant"):
224
+ c = openai_client.chat.completions.create(
225
+ model=st.session_state["openai_model"],
226
+ messages=st.session_state.messages,
227
+ stream=False
228
+ )
229
+ ans = c.choices[0].message.content
230
+ st.write("GPT-4o: " + ans)
231
+ create_file(generate_filename(text,"md"),text,ans)
232
+ st.session_state.messages.append({"role":"assistant","content":ans})
233
+ return ans
234
+
235
+ def process_with_claude(text):
236
+ if not text: return
237
+ with st.chat_message("user"):
238
+ st.markdown(text)
239
+ with st.chat_message("assistant"):
240
+ r = claude_client.messages.create(
241
+ model="claude-3-sonnet-20240229",
242
+ max_tokens=1000,
243
+ messages=[{"role":"user","content":text}]
244
+ )
245
+ ans = r.content[0].text
246
+ st.write("Claude: " + ans)
247
+ create_file(generate_filename(text,"md"),text,ans)
248
+ st.session_state.chat_history.append({"user":text,"claude":ans})
249
+ return ans
250
+
251
+ def create_zip_of_files():
252
+ md_files = glob.glob("*.md")
253
+ mp3_files = glob.glob("*.mp3")
254
+ all_files = md_files + mp3_files
255
+ zip_name = "all_files.zip"
256
+ with zipfile.ZipFile(zip_name,'w') as z:
257
+ for f in all_files:
258
+ z.write(f)
259
+ return zip_name
260
+
261
+ def get_media_html(p,typ="video",w="100%"):
262
+ d = base64.b64encode(open(p,'rb').read()).decode()
263
+ if typ=="video":
264
+ return f'<video width="{w}" controls autoplay muted loop><source src="data:video/mp4;base64,{d}" type="video/mp4"></video>'
265
+ else:
266
+ return f'<audio controls style="width:{w};"><source src="data:audio/mpeg;base64,{d}" type="audio/mpeg"></audio>'
267
+
268
+ # CHANGES START HERE:
269
+ # Define file emojis and the directory to scan
270
+ FILE_EMOJIS = {
271
+ "cards": "πŸ—ƒοΈ",
272
+ "csv": "πŸ“Š",
273
+ "heic": "πŸ–ΌοΈ",
274
+ "ico": "πŸͺ§",
275
+ "jpeg": "πŸ–ΌοΈ",
276
+ "json": "πŸ”§",
277
+ "md": "πŸ“",
278
+ "mid": "🎼",
279
+ "mov": "πŸŽ₯",
280
+ "mp3": "🎡",
281
+ "mp4": "🎞️",
282
+ "png": "πŸ–ΌοΈ",
283
+ "svg": "πŸ–ŒοΈ",
284
+ "txt": "πŸ“„",
285
+ "wav": "🎢",
286
+ "webm": "πŸ“½οΈ",
287
+ "webp": "🏞️",
288
+ "zip": "πŸ“¦",
289
+ }
290
+ MEDIA_DIR = "Media"
291
+
292
+ def get_file_ext(filename):
293
+ return os.path.splitext(filename)[1].lower().strip('.')
294
+
295
+ def load_files():
296
+ all_files = []
297
+ for root, dirs, files in os.walk(MEDIA_DIR):
298
+ for f in files:
299
+ fp = os.path.join(root, f)
300
+ if os.path.isfile(fp):
301
+ ext = get_file_ext(fp)
302
+ mod_time = os.path.getmtime(fp)
303
+ all_files.append((fp, ext, mod_time))
304
+ return all_files
305
+
306
+ def display_files_sidebar():
307
+ st.sidebar.title("πŸ“‚ Media Files")
308
+
309
+ all_files = load_files()
310
+ from collections import defaultdict
311
+ ext_map = defaultdict(list)
312
+ for fp, ext, mod_time in all_files:
313
+ ext_map[ext].append((fp, mod_time))
314
+
315
+ # Sort files in each extension group by modification time descending
316
+ for ext in ext_map:
317
+ ext_map[ext].sort(key=lambda x: x[1], reverse=True)
318
+
319
+ # Sort extensions by number of files descending
320
+ sorted_ext = sorted(ext_map.keys(), key=lambda x: len(ext_map[x]), reverse=True)
321
+
322
+ for ext in sorted_ext:
323
+ emoji = FILE_EMOJIS.get(ext, "πŸ“")
324
+ count = len(ext_map[ext])
325
+ with st.sidebar.expander(f"{emoji} {ext.upper()} ({count})"):
326
+ for fp, mod_time in ext_map[ext]:
327
+ basename = os.path.basename(fp)
328
+ last_mod = datetime.fromtimestamp(mod_time).strftime("%Y-%m-%d %H:%M:%S")
329
+ col1, col2 = st.columns([3,1])
330
+ with col1:
331
+ st.write(f"**{basename}** - Modified: {last_mod}")
332
+ with col2:
333
+ if ext == "mp3":
334
+ # For MP3, load download link only after user clicks an expander
335
+ mp3_exp = st.expander("Load MP3 Download Link")
336
+ with mp3_exp:
337
+ st.markdown(get_download_link(fp), unsafe_allow_html=True)
338
+ # If desired, add an on-demand audio player here.
339
+ else:
340
+ # Direct download link for other files
341
+ st.markdown(get_download_link(fp), unsafe_allow_html=True)
342
+ # CHANGES END HERE
343
+
344
+ def main():
345
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
346
+ tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
347
+
348
+ model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
349
+
350
+ # Declare the component
351
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
352
+ val = mycomponent(my_input_value="Hello")
353
+ if val:
354
+ user_input = val.strip()
355
+ if user_input:
356
+ if model_choice == "GPT-4o":
357
+ process_with_gpt(user_input)
358
+ elif model_choice == "Claude-3":
359
+ process_with_claude(user_input)
360
+ elif model_choice == "Arxiv":
361
+ st.subheader("Arxiv Only Results:")
362
+ perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
363
+ else:
364
+ col1,col2,col3=st.columns(3)
365
+ with col1:
366
+ st.subheader("GPT-4o Omni:")
367
+ try: process_with_gpt(user_input)
368
+ except: st.write('GPT 4o error')
369
+ with col2:
370
+ st.subheader("Claude-3 Sonnet:")
371
+ try: process_with_claude(user_input)
372
+ except: st.write('Claude error')
373
+ with col3:
374
+ st.subheader("Arxiv + Mistral:")
375
+ try:
376
+ perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
377
+ except:
378
+ st.write("Arxiv error")
379
+
380
+ if tab_main == "πŸ” Search ArXiv":
381
+ st.subheader("πŸ” Search ArXiv")
382
+ q=st.text_input("Research query:")
383
+
384
+ # πŸŽ›οΈ Audio Generation Options
385
+ st.markdown("### πŸŽ›οΈ Audio Generation Options")
386
+ vocal_summary = st.checkbox("πŸŽ™οΈ Vocal Summary (Short Answer)", value=True)
387
+ extended_refs = st.checkbox("πŸ“œ Extended References & Summaries (Long)", value=False)
388
+ titles_summary = st.checkbox("πŸ”– Paper Titles Only", value=True)
389
+
390
+ if q:
391
+ q = q.strip()
392
+ if q and st.button("Run ArXiv Query"):
393
+ r = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs, titles_summary=titles_summary)
394
+ st.markdown(r)
395
+
396
+ elif tab_main == "🎀 Voice Input":
397
+ st.subheader("🎀 Voice Recognition")
398
+ user_text = st.text_area("Message:", height=100)
399
+ user_text = user_text.strip()
400
+ if st.button("Send πŸ“¨"):
401
+ if user_text:
402
+ if model_choice == "GPT-4o":
403
+ process_with_gpt(user_text)
404
+ elif model_choice == "Claude-3":
405
+ process_with_claude(user_text)
406
+ elif model_choice == "Arxiv":
407
+ st.subheader("Arxiv Only Results:")
408
+ perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
409
+ else:
410
+ col1,col2,col3=st.columns(3)
411
+ with col1:
412
+ st.subheader("GPT-4o Omni:")
413
+ process_with_gpt(user_text)
414
+ with col2:
415
+ st.subheader("Claude-3 Sonnet:")
416
+ process_with_claude(user_text)
417
+ with col3:
418
+ st.subheader("Arxiv & Mistral:")
419
+ res = perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
420
+ st.markdown(res)
421
+ st.subheader("πŸ“œ Chat History")
422
+ t1,t2=st.tabs(["Claude History","GPT-4o History"])
423
+ with t1:
424
+ for c in st.session_state.chat_history:
425
+ st.write("**You:**", c["user"])
426
+ st.write("**Claude:**", c["claude"])
427
+ with t2:
428
+ for m in st.session_state.messages:
429
+ with st.chat_message(m["role"]):
430
+ st.markdown(m["content"])
431
+
432
+ elif tab_main == "πŸ“Έ Media Gallery":
433
+ st.header("🎬 Media Gallery - Images and Videos")
434
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
435
+ with tabs[0]:
436
+ imgs = glob.glob("*.png")+glob.glob("*.jpg")
437
+ if imgs:
438
+ c = st.slider("Cols",1,5,3)
439
+ cols = st.columns(c)
440
+ for i,f in enumerate(imgs):
441
+ with cols[i%c]:
442
+ st.image(Image.open(f),use_container_width=True)
443
+ if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}"):
444
+ a = process_image(f,"Describe this image.")
445
+ st.markdown(a)
446
+ else:
447
+ st.write("No images found.")
448
+ with tabs[1]:
449
+ vids = glob.glob("*.mp4")
450
+ if vids:
451
+ for v in vids:
452
+ with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
453
+ st.markdown(get_media_html(v,"video"),unsafe_allow_html=True)
454
+ if st.button(f"Analyze {os.path.basename(v)}"):
455
+ a = process_video_with_gpt(v,"Describe video.")
456
+ st.markdown(a)
457
+ else:
458
+ st.write("No videos found.")
459
+
460
+ elif tab_main == "πŸ“ File Editor":
461
+ if getattr(st.session_state,'current_file',None):
462
+ st.subheader(f"Editing: {st.session_state.current_file}")
463
+ new_text = st.text_area("Content:", st.session_state.file_content, height=300)
464
+ if st.button("Save"):
465
+ with open(st.session_state.current_file,'w',encoding='utf-8') as f:
466
+ f.write(new_text)
467
+ st.success("Updated!")
468
+ else:
469
+ st.write("Select a file from the sidebar to edit.")
470
+
471
+ # Use the new sidebar display function
472
+ display_files_sidebar()
473
+
474
+ if __name__=="__main__":
475
+ main()