awacke1 commited on
Commit
ddf1347
β€’
1 Parent(s): 23488d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +665 -4
app.py CHANGED
@@ -31,7 +31,7 @@ from urllib.parse import quote
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
- # Configuration and Setup
35
  Site_Name = '🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI'
36
  title = "🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI"
37
  helpURL = 'https://huggingface.co/awacke1'
@@ -50,7 +50,7 @@ st.set_page_config(
50
  }
51
  )
52
 
53
- # Load environment variables and initialize clients
54
  load_dotenv()
55
 
56
  # OpenAI setup
@@ -63,13 +63,13 @@ openai_client = OpenAI(
63
  organization=os.getenv('OPENAI_ORG_ID')
64
  )
65
 
66
- # Claude setup
67
  anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
68
  if anthropic_key == None:
69
  anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
 
72
- # Initialize session states
73
  if 'transcript_history' not in st.session_state:
74
  st.session_state.transcript_history = []
75
  if "chat_history" not in st.session_state:
@@ -81,6 +81,667 @@ if "messages" not in st.session_state:
81
  if 'last_voice_input' not in st.session_state:
82
  st.session_state.last_voice_input = ""
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  # Speech Recognition HTML Component
85
  speech_recognition_html = """
86
  <!DOCTYPE html>
 
31
  from xml.etree import ElementTree as ET
32
  from openai import OpenAI
33
 
34
+ # 1. Configuration and Setup
35
  Site_Name = '🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI'
36
  title = "🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI"
37
  helpURL = 'https://huggingface.co/awacke1'
 
50
  }
51
  )
52
 
53
+ # 2. Load environment variables and initialize clients
54
  load_dotenv()
55
 
56
  # OpenAI setup
 
63
  organization=os.getenv('OPENAI_ORG_ID')
64
  )
65
 
66
+ # 3. Claude setup
67
  anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
68
  if anthropic_key == None:
69
  anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
 
72
+ # 4. Initialize session states
73
  if 'transcript_history' not in st.session_state:
74
  st.session_state.transcript_history = []
75
  if "chat_history" not in st.session_state:
 
81
  if 'last_voice_input' not in st.session_state:
82
  st.session_state.last_voice_input = ""
83
 
84
+ # 5. # HuggingFace setup
85
+ API_URL = os.getenv('API_URL')
86
+ HF_KEY = os.getenv('HF_KEY')
87
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
88
+ MODEL2 = "openai/whisper-small.en"
89
+
90
+ headers = {
91
+ "Authorization": f"Bearer {HF_KEY}",
92
+ "Content-Type": "application/json"
93
+ }
94
+
95
+ # Initialize session states
96
+ if "chat_history" not in st.session_state:
97
+ st.session_state.chat_history = []
98
+ if "openai_model" not in st.session_state:
99
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
100
+ if "messages" not in st.session_state:
101
+ st.session_state.messages = []
102
+
103
+ # Custom CSS
104
+ st.markdown("""
105
+ <style>
106
+ .main {
107
+ background: linear-gradient(to right, #1a1a1a, #2d2d2d);
108
+ color: #ffffff;
109
+ }
110
+ .stMarkdown {
111
+ font-family: 'Helvetica Neue', sans-serif;
112
+ }
113
+ .category-header {
114
+ background: linear-gradient(45deg, #2b5876, #4e4376);
115
+ padding: 20px;
116
+ border-radius: 10px;
117
+ margin: 10px 0;
118
+ }
119
+ .scene-card {
120
+ background: rgba(0,0,0,0.3);
121
+ padding: 15px;
122
+ border-radius: 8px;
123
+ margin: 10px 0;
124
+ border: 1px solid rgba(255,255,255,0.1);
125
+ }
126
+ .media-gallery {
127
+ display: grid;
128
+ gap: 1rem;
129
+ padding: 1rem;
130
+ }
131
+ .bike-card {
132
+ background: rgba(255,255,255,0.05);
133
+ border-radius: 10px;
134
+ padding: 15px;
135
+ transition: transform 0.3s;
136
+ }
137
+ .bike-card:hover {
138
+ transform: scale(1.02);
139
+ }
140
+ </style>
141
+ """, unsafe_allow_html=True)
142
+
143
+
144
+ # Bike Collections
145
+ bike_collections = {
146
+ "Celestial Collection 🌌": {
147
+ "Eclipse Vaulter": {
148
+ "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
149
+ The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
150
+ Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
151
+ Camera angle: Low angle, wide shot
152
+ Lighting: Dramatic rim lighting from eclipse
153
+ Color palette: Deep purples, cosmic blues, corona gold""",
154
+ "emoji": "πŸŒ‘"
155
+ },
156
+ "Starlight Leaper": {
157
+ "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
158
+ Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
159
+ Camera angle: Wide-angle upward shot
160
+ Lighting: Natural starlight with subtle rim lighting
161
+ Color palette: Deep blues, silver highlights, cosmic purples""",
162
+ "emoji": "✨"
163
+ },
164
+ "Moonlit Hopper": {
165
+ "prompt": """A sleek black bike mid-hop over a moonlit meadow,
166
+ the full moon illuminating the misty surroundings. Fireflies dance around the bike,
167
+ and soft shadows create a serene yet dynamic atmosphere.
168
+ Camera angle: Side profile with slight low angle
169
+ Lighting: Soft moonlight with atmospheric fog
170
+ Color palette: Silver blues, soft whites, deep shadows""",
171
+ "emoji": "πŸŒ™"
172
+ }
173
+ },
174
+ "Nature-Inspired Collection 🌲": {
175
+ "Shadow Grasshopper": {
176
+ "prompt": """A black bike jumping between forest paths,
177
+ with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
178
+ as it soars above mossy logs.
179
+ Camera angle: Through-the-trees tracking shot
180
+ Lighting: Natural forest lighting with sun rays
181
+ Color palette: Forest greens, golden sunlight, deep shadows""",
182
+ "emoji": "πŸ¦—"
183
+ },
184
+ "Onyx Leapfrog": {
185
+ "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
186
+ the reflection on the water broken into ripples by the leap. The surrounding forest
187
+ is vibrant with greens and browns.
188
+ Camera angle: Low angle from water level
189
+ Lighting: Golden hour side lighting
190
+ Color palette: Deep blacks, water blues, forest greens""",
191
+ "emoji": "🐸"
192
+ }
193
+ }
194
+ }
195
+
196
+
197
+ # Helper Functions
198
+ def generate_filename(prompt, file_type):
199
+ """Generate a safe filename using the prompt and file type."""
200
+ central = pytz.timezone('US/Central')
201
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
202
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
203
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
204
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
205
+
206
+
207
+
208
+
209
+ # Function to create and save a file (and avoid the black hole of lost data πŸ•³)
210
+ def create_file(filename, prompt, response, should_save=True):
211
+ if not should_save:
212
+ return
213
+ with open(filename, 'w', encoding='utf-8') as file:
214
+ file.write(prompt + "\n\n" + response)
215
+
216
+
217
+
218
+ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
219
+ """Create and save file with proper handling of different types."""
220
+ if not should_save:
221
+ return None
222
+ filename = generate_filename(prompt if prompt else content, file_type)
223
+ with open(filename, "w", encoding="utf-8") as f:
224
+ if is_image:
225
+ f.write(content)
226
+ else:
227
+ f.write(prompt + "\n\n" + content if prompt else content)
228
+ return filename
229
+
230
+ def get_download_link(file_path):
231
+ """Create download link for file."""
232
+ with open(file_path, "rb") as file:
233
+ contents = file.read()
234
+ b64 = base64.b64encode(contents).decode()
235
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
236
+
237
+ @st.cache_resource
238
+ def SpeechSynthesis(result):
239
+ """HTML5 Speech Synthesis."""
240
+ documentHTML5 = f'''
241
+ <!DOCTYPE html>
242
+ <html>
243
+ <head>
244
+ <title>Read It Aloud</title>
245
+ <script type="text/javascript">
246
+ function readAloud() {{
247
+ const text = document.getElementById("textArea").value;
248
+ const speech = new SpeechSynthesisUtterance(text);
249
+ window.speechSynthesis.speak(speech);
250
+ }}
251
+ </script>
252
+ </head>
253
+ <body>
254
+ <h1>πŸ”Š Read It Aloud</h1>
255
+ <textarea id="textArea" rows="10" cols="80">{result}</textarea>
256
+ <br>
257
+ <button onclick="readAloud()">πŸ”Š Read Aloud</button>
258
+ </body>
259
+ </html>
260
+ '''
261
+ components.html(documentHTML5, width=1280, height=300)
262
+
263
+ # Media Processing Functions
264
+ def process_image(image_input, user_prompt):
265
+ """Process image with GPT-4o vision."""
266
+ if isinstance(image_input, str):
267
+ with open(image_input, "rb") as image_file:
268
+ image_input = image_file.read()
269
+
270
+ base64_image = base64.b64encode(image_input).decode("utf-8")
271
+
272
+ response = openai_client.chat.completions.create(
273
+ model=st.session_state["openai_model"],
274
+ messages=[
275
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
276
+ {"role": "user", "content": [
277
+ {"type": "text", "text": user_prompt},
278
+ {"type": "image_url", "image_url": {
279
+ "url": f"data:image/png;base64,{base64_image}"
280
+ }}
281
+ ]}
282
+ ],
283
+ temperature=0.0,
284
+ )
285
+
286
+ return response.choices[0].message.content
287
+
288
+ def process_audio(audio_input, text_input=''):
289
+ """Process audio with Whisper and GPT."""
290
+ if isinstance(audio_input, str):
291
+ with open(audio_input, "rb") as file:
292
+ audio_input = file.read()
293
+
294
+ transcription = openai_client.audio.transcriptions.create(
295
+ model="whisper-1",
296
+ file=audio_input,
297
+ )
298
+
299
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
300
+
301
+ with st.chat_message("assistant"):
302
+ st.markdown(transcription.text)
303
+ SpeechSynthesis(transcription.text)
304
+
305
+ filename = generate_filename(transcription.text, "wav")
306
+ create_and_save_file(audio_input, "wav", transcription.text, True)
307
+
308
+ def process_video(video_path, seconds_per_frame=1):
309
+ """Process video files for frame extraction and audio."""
310
+ base64Frames = []
311
+ video = cv2.VideoCapture(video_path)
312
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
313
+ fps = video.get(cv2.CAP_PROP_FPS)
314
+ frames_to_skip = int(fps * seconds_per_frame)
315
+
316
+ for frame_idx in range(0, total_frames, frames_to_skip):
317
+ video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
318
+ success, frame = video.read()
319
+ if not success:
320
+ break
321
+ _, buffer = cv2.imencode(".jpg", frame)
322
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
323
+
324
+ video.release()
325
+
326
+ # Extract audio
327
+ base_video_path = os.path.splitext(video_path)[0]
328
+ audio_path = f"{base_video_path}.mp3"
329
+ try:
330
+ video_clip = VideoFileClip(video_path)
331
+ video_clip.audio.write_audiofile(audio_path)
332
+ video_clip.close()
333
+ except:
334
+ st.warning("No audio track found in video")
335
+ audio_path = None
336
+
337
+ return base64Frames, audio_path
338
+
339
+ def process_video_with_gpt(video_input, user_prompt):
340
+ """Process video with GPT-4o vision."""
341
+ base64Frames, audio_path = process_video(video_input)
342
+
343
+ response = openai_client.chat.completions.create(
344
+ model=st.session_state["openai_model"],
345
+ messages=[
346
+ {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
347
+ {"role": "user", "content": [
348
+ {"type": "text", "text": user_prompt},
349
+ *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
350
+ for frame in base64Frames]
351
+ ]}
352
+ ]
353
+ )
354
+
355
+ return response.choices[0].message.content
356
+
357
+
358
+ def extract_urls(text):
359
+ try:
360
+ date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
361
+ abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
362
+ pdf_link_pattern = re.compile(r'\[⬇️\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
363
+ title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
364
+ date_matches = date_pattern.findall(text)
365
+ abs_link_matches = abs_link_pattern.findall(text)
366
+ pdf_link_matches = pdf_link_pattern.findall(text)
367
+ title_matches = title_pattern.findall(text)
368
+
369
+ # markdown with the extracted fields
370
+ markdown_text = ""
371
+ for i in range(len(date_matches)):
372
+ date = date_matches[i]
373
+ title = title_matches[i]
374
+ abs_link = abs_link_matches[i][1]
375
+ pdf_link = pdf_link_matches[i]
376
+ markdown_text += f"**Date:** {date}\n\n"
377
+ markdown_text += f"**Title:** {title}\n\n"
378
+ markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
379
+ markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
380
+ markdown_text += "---\n\n"
381
+ return markdown_text
382
+
383
+ except:
384
+ st.write('.')
385
+ return ''
386
+
387
+
388
+ def search_arxiv(query):
389
+
390
+ st.write("Performing AI Lookup...")
391
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
392
+
393
+ result1 = client.predict(
394
+ prompt=query,
395
+ llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
396
+ stream_outputs=True,
397
+ api_name="/ask_llm"
398
+ )
399
+ st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
400
+ st.markdown(result1)
401
+
402
+ result2 = client.predict(
403
+ prompt=query,
404
+ llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
405
+ stream_outputs=True,
406
+ api_name="/ask_llm"
407
+ )
408
+ st.markdown("### Mistral-7B-Instruct-v0.2 Result")
409
+ st.markdown(result2)
410
+ combined_result = f"{result1}\n\n{result2}"
411
+ return combined_result
412
+
413
+ #return responseall
414
+
415
+
416
+ # Function to generate a filename based on prompt and time (because names matter πŸ•’)
417
+ def generate_filename(prompt, file_type):
418
+ central = pytz.timezone('US/Central')
419
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
420
+ safe_prompt = re.sub(r'\W+', '_', prompt)[:90]
421
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
422
+
423
+ # Function to create and save a file (and avoid the black hole of lost data πŸ•³)
424
+ def create_file(filename, prompt, response):
425
+ with open(filename, 'w', encoding='utf-8') as file:
426
+ file.write(prompt + "\n\n" + response)
427
+
428
+
429
+ def perform_ai_lookup(query):
430
+ start_time = time.strftime("%Y-%m-%d %H:%M:%S")
431
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
432
+ response1 = client.predict(
433
+ query,
434
+ 20,
435
+ "Semantic Search",
436
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
437
+ api_name="/update_with_rag_md"
438
+ )
439
+ Question = '### πŸ”Ž ' + query + '\r\n' # Format for markdown display with links
440
+ References = response1[0]
441
+ ReferenceLinks = extract_urls(References)
442
+
443
+ RunSecondQuery = True
444
+ results=''
445
+ if RunSecondQuery:
446
+ # Search 2 - Retrieve the Summary with Papers Context and Original Query
447
+ response2 = client.predict(
448
+ query,
449
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
450
+ True,
451
+ api_name="/ask_llm"
452
+ )
453
+ if len(response2) > 10:
454
+ Answer = response2
455
+ SpeechSynthesis(Answer)
456
+ # Restructure results to follow format of Question, Answer, References, ReferenceLinks
457
+ results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
458
+ st.markdown(results)
459
+
460
+ st.write('πŸ”Run of Multi-Agent System Paper Summary Spec is Complete')
461
+ end_time = time.strftime("%Y-%m-%d %H:%M:%S")
462
+ start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
463
+ end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
464
+ elapsed_seconds = end_timestamp - start_timestamp
465
+ st.write(f"Start time: {start_time}")
466
+ st.write(f"Finish time: {end_time}")
467
+ st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
468
+
469
+
470
+ filename = generate_filename(query, "md")
471
+ create_file(filename, query, results)
472
+ return results
473
+
474
+ # Chat Processing Functions
475
+ def process_with_gpt(text_input):
476
+ """Process text with GPT-4o."""
477
+ if text_input:
478
+ st.session_state.messages.append({"role": "user", "content": text_input})
479
+
480
+ with st.chat_message("user"):
481
+ st.markdown(text_input)
482
+
483
+ with st.chat_message("assistant"):
484
+ completion = openai_client.chat.completions.create(
485
+ model=st.session_state["openai_model"],
486
+ messages=[
487
+ {"role": m["role"], "content": m["content"]}
488
+ for m in st.session_state.messages
489
+ ],
490
+ stream=False
491
+ )
492
+ return_text = completion.choices[0].message.content
493
+ st.write("GPT-4o: " + return_text)
494
+
495
+ #filename = generate_filename(text_input, "md")
496
+ filename = generate_filename("GPT-4o: " + return_text, "md")
497
+ create_file(filename, text_input, return_text)
498
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
499
+ return return_text
500
+
501
+ def process_with_claude(text_input):
502
+ """Process text with Claude."""
503
+ if text_input:
504
+
505
+ with st.chat_message("user"):
506
+ st.markdown(text_input)
507
+
508
+ with st.chat_message("assistant"):
509
+ response = claude_client.messages.create(
510
+ model="claude-3-sonnet-20240229",
511
+ max_tokens=1000,
512
+ messages=[
513
+ {"role": "user", "content": text_input}
514
+ ]
515
+ )
516
+ response_text = response.content[0].text
517
+ st.write("Claude: " + response_text)
518
+
519
+ #filename = generate_filename(text_input, "md")
520
+ filename = generate_filename("Claude: " + response_text, "md")
521
+ create_file(filename, text_input, response_text)
522
+
523
+ st.session_state.chat_history.append({
524
+ "user": text_input,
525
+ "claude": response_text
526
+ })
527
+ return response_text
528
+
529
+ # File Management Functions
530
+ def load_file(file_name):
531
+ """Load file content."""
532
+ with open(file_name, "r", encoding='utf-8') as file:
533
+ content = file.read()
534
+ return content
535
+
536
+ def create_zip_of_files(files):
537
+ """Create zip archive of files."""
538
+ zip_name = "all_files.zip"
539
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
540
+ for file in files:
541
+ zipf.write(file)
542
+ return zip_name
543
+
544
+
545
+
546
+ def get_media_html(media_path, media_type="video", width="100%"):
547
+ """Generate HTML for media player."""
548
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
549
+ if media_type == "video":
550
+ return f'''
551
+ <video width="{width}" controls autoplay muted loop>
552
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
553
+ Your browser does not support the video tag.
554
+ </video>
555
+ '''
556
+ else: # audio
557
+ return f'''
558
+ <audio controls style="width: {width};">
559
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
560
+ Your browser does not support the audio element.
561
+ </audio>
562
+ '''
563
+
564
+ def create_media_gallery():
565
+ """Create the media gallery interface."""
566
+ st.header("🎬 Media Gallery")
567
+
568
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"])
569
+
570
+ with tabs[0]:
571
+ image_files = glob.glob("*.png") + glob.glob("*.jpg")
572
+ if image_files:
573
+ num_cols = st.slider("Number of columns", 1, 5, 3)
574
+ cols = st.columns(num_cols)
575
+ for idx, image_file in enumerate(image_files):
576
+ with cols[idx % num_cols]:
577
+ img = Image.open(image_file)
578
+ st.image(img, use_container_width=True)
579
+
580
+ # Add GPT vision analysis option
581
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
582
+ analysis = process_image(image_file,
583
+ "Describe this image in detail and identify key elements.")
584
+ st.markdown(analysis)
585
+
586
+ with tabs[1]:
587
+ audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
588
+ for audio_file in audio_files:
589
+ with st.expander(f"🎡 {os.path.basename(audio_file)}"):
590
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
591
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
592
+ with open(audio_file, "rb") as f:
593
+ transcription = process_audio(f)
594
+ st.write(transcription)
595
+
596
+ with tabs[2]:
597
+ video_files = glob.glob("*.mp4")
598
+ for video_file in video_files:
599
+ with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
600
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
601
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
602
+ analysis = process_video_with_gpt(video_file,
603
+ "Describe what's happening in this video.")
604
+ st.markdown(analysis)
605
+
606
+ with tabs[3]:
607
+ for collection_name, bikes in bike_collections.items():
608
+ st.subheader(collection_name)
609
+ cols = st.columns(len(bikes))
610
+
611
+ for idx, (bike_name, details) in enumerate(bikes.items()):
612
+ with cols[idx]:
613
+ st.markdown(f"""
614
+ <div class='bike-card'>
615
+ <h3>{details['emoji']} {bike_name}</h3>
616
+ <p>{details['prompt']}</p>
617
+ </div>
618
+ """, unsafe_allow_html=True)
619
+
620
+ if st.button(f"Generate {bike_name} Scene"):
621
+ prompt = details['prompt']
622
+ # Here you could integrate with image generation API
623
+ st.write(f"Generated scene description for {bike_name}:")
624
+ st.write(prompt)
625
+
626
+ def display_file_manager():
627
+ """Display file management sidebar."""
628
+ st.sidebar.title("πŸ“ File Management")
629
+
630
+ all_files = glob.glob("*.md")
631
+ all_files.sort(reverse=True)
632
+
633
+ if st.sidebar.button("πŸ—‘ Delete All"):
634
+ for file in all_files:
635
+ os.remove(file)
636
+ st.rerun()
637
+
638
+ if st.sidebar.button("⬇️ Download All"):
639
+ zip_file = create_zip_of_files(all_files)
640
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
641
+
642
+ for file in all_files:
643
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
644
+ with col1:
645
+ if st.button("🌐", key="view_"+file):
646
+ st.session_state.current_file = file
647
+ st.session_state.file_content = load_file(file)
648
+ with col2:
649
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
650
+ with col3:
651
+ if st.button("πŸ“‚", key="edit_"+file):
652
+ st.session_state.current_file = file
653
+ st.session_state.file_content = load_file(file)
654
+ with col4:
655
+ if st.button("πŸ—‘", key="delete_"+file):
656
+ os.remove(file)
657
+ st.rerun()
658
+
659
+ def main():
660
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
661
+
662
+ # Main navigation
663
+ tab_main = st.radio("Choose Action:",
664
+ ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
665
+ horizontal=True)
666
+
667
+ if tab_main == "πŸ’¬ Chat":
668
+ # Model Selection
669
+ model_choice = st.sidebar.radio(
670
+ "Choose AI Model:",
671
+ ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
672
+ )
673
+
674
+ # Chat Interface
675
+ user_input = st.text_area("Message:", height=100)
676
+
677
+ if st.button("Send πŸ“¨"):
678
+ if user_input:
679
+ if model_choice == "GPT-4o":
680
+ gpt_response = process_with_gpt(user_input)
681
+ elif model_choice == "Claude-3":
682
+ claude_response = process_with_claude(user_input)
683
+ else: # Both
684
+ col1, col2, col3 = st.columns(3)
685
+ with col2:
686
+ st.subheader("Claude-3.5 Sonnet:")
687
+ try:
688
+ claude_response = process_with_claude(user_input)
689
+ except:
690
+ st.write('Claude 3.5 Sonnet out of tokens.')
691
+ with col1:
692
+ st.subheader("GPT-4o Omni:")
693
+ try:
694
+ gpt_response = process_with_gpt(user_input)
695
+ except:
696
+ st.write('GPT 4o out of tokens')
697
+ with col3:
698
+ st.subheader("Arxiv and Mistral Research:")
699
+ with st.spinner("Searching ArXiv..."):
700
+ #results = search_arxiv(user_input)
701
+ results = perform_ai_lookup(user_input)
702
+
703
+ st.markdown(results)
704
+
705
+ # Display Chat History
706
+ st.subheader("Chat History πŸ“œ")
707
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
708
+
709
+ with tab1:
710
+ for chat in st.session_state.chat_history:
711
+ st.text_area("You:", chat["user"], height=100)
712
+ st.text_area("Claude:", chat["claude"], height=200)
713
+ st.markdown(chat["claude"])
714
+
715
+ with tab2:
716
+ for message in st.session_state.messages:
717
+ with st.chat_message(message["role"]):
718
+ st.markdown(message["content"])
719
+
720
+ elif tab_main == "πŸ“Έ Media Gallery":
721
+ create_media_gallery()
722
+
723
+ elif tab_main == "πŸ” Search ArXiv":
724
+ query = st.text_input("Enter your research query:")
725
+ if query:
726
+ with st.spinner("Searching ArXiv..."):
727
+ results = search_arxiv(query)
728
+ st.markdown(results)
729
+
730
+ elif tab_main == "πŸ“ File Editor":
731
+ if hasattr(st.session_state, 'current_file'):
732
+ st.subheader(f"Editing: {st.session_state.current_file}")
733
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
734
+ if st.button("Save Changes"):
735
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
736
+ file.write(new_content)
737
+ st.success("File updated successfully!")
738
+
739
+ # Always show file manager in sidebar
740
+ display_file_manager()
741
+
742
+ if __name__ == "__main__":
743
+ main()
744
+
745
  # Speech Recognition HTML Component
746
  speech_recognition_html = """
747
  <!DOCTYPE html>