awacke1 commited on
Commit
c659a35
β€’
1 Parent(s): af81713

Create backup6.app.py

Browse files
Files changed (1) hide show
  1. backup6.app.py +622 -0
backup6.app.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic
3
+ import openai
4
+ import base64
5
+ from datetime import datetime
6
+ import plotly.graph_objects as go
7
+ import cv2
8
+ import glob
9
+ import json
10
+ import math
11
+ import os
12
+ import pytz
13
+ import random
14
+ import re
15
+ import requests
16
+ import streamlit.components.v1 as components
17
+ import textract
18
+ import time
19
+ import zipfile
20
+ from audio_recorder_streamlit import audio_recorder
21
+ from bs4 import BeautifulSoup
22
+ from collections import deque
23
+ from dotenv import load_dotenv
24
+ from gradio_client import Client, handle_file
25
+ from huggingface_hub import InferenceClient
26
+ from io import BytesIO
27
+ from moviepy.editor import VideoFileClip
28
+ from PIL import Image
29
+ from PyPDF2 import PdfReader
30
+ from urllib.parse import quote
31
+ from xml.etree import ElementTree as ET
32
+ from openai import OpenAI
33
+
34
+ # 1. Configuration and Setup
35
+ Site_Name = '🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI'
36
+ title = "🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI"
37
+ helpURL = 'https://huggingface.co/awacke1'
38
+ bugURL = 'https://huggingface.co/spaces/awacke1'
39
+ icons = 'πŸš²πŸ†'
40
+
41
+ st.set_page_config(
42
+ page_title=title,
43
+ page_icon=icons,
44
+ layout="wide",
45
+ initial_sidebar_state="auto",
46
+ menu_items={
47
+ 'Get Help': helpURL,
48
+ 'Report a bug': bugURL,
49
+ 'About': title
50
+ }
51
+ )
52
+
53
+ # 2. Load environment variables and initialize clients
54
+ load_dotenv()
55
+
56
+ # OpenAI setup
57
+ openai.api_key = os.getenv('OPENAI_API_KEY')
58
+ if openai.api_key == None:
59
+ openai.api_key = st.secrets['OPENAI_API_KEY']
60
+
61
+ openai_client = OpenAI(
62
+ api_key=os.getenv('OPENAI_API_KEY'),
63
+ organization=os.getenv('OPENAI_ORG_ID')
64
+ )
65
+
66
+ # Claude setup
67
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
68
+ if anthropic_key == None:
69
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
+
72
+ # HuggingFace setup
73
+ API_URL = os.getenv('API_URL')
74
+ HF_KEY = os.getenv('HF_KEY')
75
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
76
+ MODEL2 = "openai/whisper-small.en"
77
+
78
+ headers = {
79
+ "Authorization": f"Bearer {HF_KEY}",
80
+ "Content-Type": "application/json"
81
+ }
82
+
83
+ # Initialize session states
84
+ if "chat_history" not in st.session_state:
85
+ st.session_state.chat_history = []
86
+ if "openai_model" not in st.session_state:
87
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
88
+ if "messages" not in st.session_state:
89
+ st.session_state.messages = []
90
+
91
+ # Custom CSS
92
+ st.markdown("""
93
+ <style>
94
+ .main {
95
+ background: linear-gradient(to right, #1a1a1a, #2d2d2d);
96
+ color: #ffffff;
97
+ }
98
+ .stMarkdown {
99
+ font-family: 'Helvetica Neue', sans-serif;
100
+ }
101
+ .category-header {
102
+ background: linear-gradient(45deg, #2b5876, #4e4376);
103
+ padding: 20px;
104
+ border-radius: 10px;
105
+ margin: 10px 0;
106
+ }
107
+ .scene-card {
108
+ background: rgba(0,0,0,0.3);
109
+ padding: 15px;
110
+ border-radius: 8px;
111
+ margin: 10px 0;
112
+ border: 1px solid rgba(255,255,255,0.1);
113
+ }
114
+ .media-gallery {
115
+ display: grid;
116
+ gap: 1rem;
117
+ padding: 1rem;
118
+ }
119
+ .bike-card {
120
+ background: rgba(255,255,255,0.05);
121
+ border-radius: 10px;
122
+ padding: 15px;
123
+ transition: transform 0.3s;
124
+ }
125
+ .bike-card:hover {
126
+ transform: scale(1.02);
127
+ }
128
+ </style>
129
+ """, unsafe_allow_html=True)
130
+
131
+ # Bike Collections
132
+ bike_collections = {
133
+ "Celestial Collection 🌌": {
134
+ "Eclipse Vaulter": {
135
+ "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
136
+ The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
137
+ Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
138
+ Camera angle: Low angle, wide shot
139
+ Lighting: Dramatic rim lighting from eclipse
140
+ Color palette: Deep purples, cosmic blues, corona gold""",
141
+ "emoji": "πŸŒ‘"
142
+ },
143
+ "Starlight Leaper": {
144
+ "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
145
+ Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
146
+ Camera angle: Wide-angle upward shot
147
+ Lighting: Natural starlight with subtle rim lighting
148
+ Color palette: Deep blues, silver highlights, cosmic purples""",
149
+ "emoji": "✨"
150
+ },
151
+ "Moonlit Hopper": {
152
+ "prompt": """A sleek black bike mid-hop over a moonlit meadow,
153
+ the full moon illuminating the misty surroundings. Fireflies dance around the bike,
154
+ and soft shadows create a serene yet dynamic atmosphere.
155
+ Camera angle: Side profile with slight low angle
156
+ Lighting: Soft moonlight with atmospheric fog
157
+ Color palette: Silver blues, soft whites, deep shadows""",
158
+ "emoji": "πŸŒ™"
159
+ }
160
+ },
161
+ "Nature-Inspired Collection 🌲": {
162
+ "Shadow Grasshopper": {
163
+ "prompt": """A black bike jumping between forest paths,
164
+ with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
165
+ as it soars above mossy logs.
166
+ Camera angle: Through-the-trees tracking shot
167
+ Lighting: Natural forest lighting with sun rays
168
+ Color palette: Forest greens, golden sunlight, deep shadows""",
169
+ "emoji": "πŸ¦—"
170
+ },
171
+ "Onyx Leapfrog": {
172
+ "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
173
+ the reflection on the water broken into ripples by the leap. The surrounding forest
174
+ is vibrant with greens and browns.
175
+ Camera angle: Low angle from water level
176
+ Lighting: Golden hour side lighting
177
+ Color palette: Deep blacks, water blues, forest greens""",
178
+ "emoji": "🐸"
179
+ }
180
+ }
181
+ }
182
+
183
+ # Helper Functions
184
+ def generate_filename(prompt, file_type):
185
+ """Generate a safe filename using the prompt and file type."""
186
+ central = pytz.timezone('US/Central')
187
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
188
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
189
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
190
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
191
+
192
+
193
+
194
+
195
+ # Function to create and save a file (and avoid the black hole of lost data πŸ•³)
196
+ def create_file(filename, prompt, response, should_save=True):
197
+ if not should_save:
198
+ return
199
+ with open(filename, 'w', encoding='utf-8') as file:
200
+ file.write(prompt + "\n\n" + response)
201
+
202
+
203
+
204
+ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
205
+ """Create and save file with proper handling of different types."""
206
+ if not should_save:
207
+ return None
208
+ filename = generate_filename(prompt if prompt else content, file_type)
209
+ with open(filename, "w", encoding="utf-8") as f:
210
+ if is_image:
211
+ f.write(content)
212
+ else:
213
+ f.write(prompt + "\n\n" + content if prompt else content)
214
+ return filename
215
+
216
+ def get_download_link(file_path):
217
+ """Create download link for file."""
218
+ with open(file_path, "rb") as file:
219
+ contents = file.read()
220
+ b64 = base64.b64encode(contents).decode()
221
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
222
+
223
+ @st.cache_resource
224
+ def SpeechSynthesis(result):
225
+ """HTML5 Speech Synthesis."""
226
+ documentHTML5 = f'''
227
+ <!DOCTYPE html>
228
+ <html>
229
+ <head>
230
+ <title>Read It Aloud</title>
231
+ <script type="text/javascript">
232
+ function readAloud() {{
233
+ const text = document.getElementById("textArea").value;
234
+ const speech = new SpeechSynthesisUtterance(text);
235
+ window.speechSynthesis.speak(speech);
236
+ }}
237
+ </script>
238
+ </head>
239
+ <body>
240
+ <h1>πŸ”Š Read It Aloud</h1>
241
+ <textarea id="textArea" rows="10" cols="80">{result}</textarea>
242
+ <br>
243
+ <button onclick="readAloud()">πŸ”Š Read Aloud</button>
244
+ </body>
245
+ </html>
246
+ '''
247
+ components.html(documentHTML5, width=1280, height=300)
248
+
249
+ # Media Processing Functions
250
+ def process_image(image_input, user_prompt):
251
+ """Process image with GPT-4o vision."""
252
+ if isinstance(image_input, str):
253
+ with open(image_input, "rb") as image_file:
254
+ image_input = image_file.read()
255
+
256
+ base64_image = base64.b64encode(image_input).decode("utf-8")
257
+
258
+ response = openai_client.chat.completions.create(
259
+ model=st.session_state["openai_model"],
260
+ messages=[
261
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
262
+ {"role": "user", "content": [
263
+ {"type": "text", "text": user_prompt},
264
+ {"type": "image_url", "image_url": {
265
+ "url": f"data:image/png;base64,{base64_image}"
266
+ }}
267
+ ]}
268
+ ],
269
+ temperature=0.0,
270
+ )
271
+
272
+ return response.choices[0].message.content
273
+
274
+ def process_audio(audio_input, text_input=''):
275
+ """Process audio with Whisper and GPT."""
276
+ if isinstance(audio_input, str):
277
+ with open(audio_input, "rb") as file:
278
+ audio_input = file.read()
279
+
280
+ transcription = openai_client.audio.transcriptions.create(
281
+ model="whisper-1",
282
+ file=audio_input,
283
+ )
284
+
285
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
286
+
287
+ with st.chat_message("assistant"):
288
+ st.markdown(transcription.text)
289
+ SpeechSynthesis(transcription.text)
290
+
291
+ filename = generate_filename(transcription.text, "wav")
292
+ create_and_save_file(audio_input, "wav", transcription.text, True)
293
+
294
+ def process_video(video_path, seconds_per_frame=1):
295
+ """Process video files for frame extraction and audio."""
296
+ base64Frames = []
297
+ video = cv2.VideoCapture(video_path)
298
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
299
+ fps = video.get(cv2.CAP_PROP_FPS)
300
+ frames_to_skip = int(fps * seconds_per_frame)
301
+
302
+ for frame_idx in range(0, total_frames, frames_to_skip):
303
+ video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
304
+ success, frame = video.read()
305
+ if not success:
306
+ break
307
+ _, buffer = cv2.imencode(".jpg", frame)
308
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
309
+
310
+ video.release()
311
+
312
+ # Extract audio
313
+ base_video_path = os.path.splitext(video_path)[0]
314
+ audio_path = f"{base_video_path}.mp3"
315
+ try:
316
+ video_clip = VideoFileClip(video_path)
317
+ video_clip.audio.write_audiofile(audio_path)
318
+ video_clip.close()
319
+ except:
320
+ st.warning("No audio track found in video")
321
+ audio_path = None
322
+
323
+ return base64Frames, audio_path
324
+
325
+ def process_video_with_gpt(video_input, user_prompt):
326
+ """Process video with GPT-4o vision."""
327
+ base64Frames, audio_path = process_video(video_input)
328
+
329
+ response = openai_client.chat.completions.create(
330
+ model=st.session_state["openai_model"],
331
+ messages=[
332
+ {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
333
+ {"role": "user", "content": [
334
+ {"type": "text", "text": user_prompt},
335
+ *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
336
+ for frame in base64Frames]
337
+ ]}
338
+ ]
339
+ )
340
+
341
+ return response.choices[0].message.content
342
+
343
+ # ArXiv Search Functions
344
+ def search_arxiv(query):
345
+ """Search ArXiv papers using Hugging Face client."""
346
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
347
+ response = client.predict(
348
+ query,
349
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
350
+ True,
351
+ api_name="/ask_llm"
352
+ )
353
+ return response
354
+
355
+ # Chat Processing Functions
356
+ def process_with_gpt(text_input):
357
+ """Process text with GPT-4o."""
358
+ if text_input:
359
+ st.session_state.messages.append({"role": "user", "content": text_input})
360
+
361
+ with st.chat_message("user"):
362
+ st.markdown(text_input)
363
+
364
+ with st.chat_message("assistant"):
365
+ completion = openai_client.chat.completions.create(
366
+ model=st.session_state["openai_model"],
367
+ messages=[
368
+ {"role": m["role"], "content": m["content"]}
369
+ for m in st.session_state.messages
370
+ ],
371
+ stream=False
372
+ )
373
+ return_text = completion.choices[0].message.content
374
+ st.write("GPT-4o: " + return_text)
375
+
376
+ #filename = generate_filename(text_input, "md")
377
+ filename = generate_filename("GPT-4o: " + return_text, "md")
378
+ create_file(filename, text_input, return_text)
379
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
380
+ return return_text
381
+
382
+ def process_with_claude(text_input):
383
+ """Process text with Claude."""
384
+ if text_input:
385
+
386
+ with st.chat_message("user"):
387
+ st.markdown(text_input)
388
+
389
+ with st.chat_message("assistant"):
390
+ response = claude_client.messages.create(
391
+ model="claude-3-sonnet-20240229",
392
+ max_tokens=1000,
393
+ messages=[
394
+ {"role": "user", "content": text_input}
395
+ ]
396
+ )
397
+ response_text = response.content[0].text
398
+ st.write("Claude: " + response_text)
399
+
400
+ #filename = generate_filename(text_input, "md")
401
+ filename = generate_filename("Claude: " + response_text, "md")
402
+ create_file(filename, text_input, response_text)
403
+
404
+ st.session_state.chat_history.append({
405
+ "user": text_input,
406
+ "claude": response_text
407
+ })
408
+ return response_text
409
+
410
+ # File Management Functions
411
+ def load_file(file_name):
412
+ """Load file content."""
413
+ with open(file_name, "r", encoding='utf-8') as file:
414
+ content = file.read()
415
+ return content
416
+
417
+ def create_zip_of_files(files):
418
+ """Create zip archive of files."""
419
+ zip_name = "all_files.zip"
420
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
421
+ for file in files:
422
+ zipf.write(file)
423
+ return zip_name
424
+
425
+
426
+
427
+ def get_media_html(media_path, media_type="video", width="100%"):
428
+ """Generate HTML for media player."""
429
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
430
+ if media_type == "video":
431
+ return f'''
432
+ <video width="{width}" controls autoplay muted loop>
433
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
434
+ Your browser does not support the video tag.
435
+ </video>
436
+ '''
437
+ else: # audio
438
+ return f'''
439
+ <audio controls style="width: {width};">
440
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
441
+ Your browser does not support the audio element.
442
+ </audio>
443
+ '''
444
+
445
+ def create_media_gallery():
446
+ """Create the media gallery interface."""
447
+ st.header("🎬 Media Gallery")
448
+
449
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"])
450
+
451
+ with tabs[0]:
452
+ image_files = glob.glob("*.png") + glob.glob("*.jpg")
453
+ if image_files:
454
+ num_cols = st.slider("Number of columns", 1, 5, 3)
455
+ cols = st.columns(num_cols)
456
+ for idx, image_file in enumerate(image_files):
457
+ with cols[idx % num_cols]:
458
+ img = Image.open(image_file)
459
+ st.image(img, use_container_width=True)
460
+
461
+ # Add GPT vision analysis option
462
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
463
+ analysis = process_image(image_file,
464
+ "Describe this image in detail and identify key elements.")
465
+ st.markdown(analysis)
466
+
467
+ with tabs[1]:
468
+ audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
469
+ for audio_file in audio_files:
470
+ with st.expander(f"🎡 {os.path.basename(audio_file)}"):
471
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
472
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
473
+ with open(audio_file, "rb") as f:
474
+ transcription = process_audio(f)
475
+ st.write(transcription)
476
+
477
+ with tabs[2]:
478
+ video_files = glob.glob("*.mp4")
479
+ for video_file in video_files:
480
+ with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
481
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
482
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
483
+ analysis = process_video_with_gpt(video_file,
484
+ "Describe what's happening in this video.")
485
+ st.markdown(analysis)
486
+
487
+ with tabs[3]:
488
+ for collection_name, bikes in bike_collections.items():
489
+ st.subheader(collection_name)
490
+ cols = st.columns(len(bikes))
491
+
492
+ for idx, (bike_name, details) in enumerate(bikes.items()):
493
+ with cols[idx]:
494
+ st.markdown(f"""
495
+ <div class='bike-card'>
496
+ <h3>{details['emoji']} {bike_name}</h3>
497
+ <p>{details['prompt']}</p>
498
+ </div>
499
+ """, unsafe_allow_html=True)
500
+
501
+ if st.button(f"Generate {bike_name} Scene"):
502
+ prompt = details['prompt']
503
+ # Here you could integrate with image generation API
504
+ st.write(f"Generated scene description for {bike_name}:")
505
+ st.write(prompt)
506
+
507
+ def display_file_manager():
508
+ """Display file management sidebar."""
509
+ st.sidebar.title("πŸ“ File Management")
510
+
511
+ all_files = glob.glob("*.md")
512
+ all_files.sort(reverse=True)
513
+
514
+ if st.sidebar.button("πŸ—‘ Delete All"):
515
+ for file in all_files:
516
+ os.remove(file)
517
+ st.rerun()
518
+
519
+ if st.sidebar.button("⬇️ Download All"):
520
+ zip_file = create_zip_of_files(all_files)
521
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
522
+
523
+ for file in all_files:
524
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
525
+ with col1:
526
+ if st.button("🌐", key="view_"+file):
527
+ st.session_state.current_file = file
528
+ st.session_state.file_content = load_file(file)
529
+ with col2:
530
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
531
+ with col3:
532
+ if st.button("πŸ“‚", key="edit_"+file):
533
+ st.session_state.current_file = file
534
+ st.session_state.file_content = load_file(file)
535
+ with col4:
536
+ if st.button("πŸ—‘", key="delete_"+file):
537
+ os.remove(file)
538
+ st.rerun()
539
+
540
+ def main():
541
+ st.title("🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
542
+
543
+ # Main navigation
544
+ tab_main = st.radio("Choose Action:",
545
+ ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
546
+ horizontal=True)
547
+
548
+ if tab_main == "πŸ’¬ Chat":
549
+ # Model Selection
550
+ model_choice = st.sidebar.radio(
551
+ "Choose AI Model:",
552
+ ["GPT-4o", "Claude-3", "Both"]
553
+ )
554
+
555
+ # Chat Interface
556
+ user_input = st.text_area("Message:", height=100)
557
+
558
+ if st.button("Send πŸ“¨"):
559
+ if user_input:
560
+ if model_choice == "GPT-4o":
561
+ gpt_response = process_with_gpt(user_input)
562
+ elif model_choice == "Claude-3":
563
+ claude_response = process_with_claude(user_input)
564
+ else: # Both
565
+ col1, col2, col3 = st.columns(3)
566
+ with col2:
567
+ st.subheader("Claude-3.5 Sonnet:")
568
+ try:
569
+ claude_response = process_with_claude(user_input)
570
+ except:
571
+ st.write('Claude 3.5 Sonnet out of tokens.')
572
+ with col1:
573
+ st.subheader("GPT-4o Omni:")
574
+ try:
575
+ gpt_response = process_with_gpt(user_input)
576
+ except:
577
+ st.write('GPT 4o out of tokens')
578
+ with col3:
579
+ st.subheader("Arxiv and Mistral Research:")
580
+ with st.spinner("Searching ArXiv..."):
581
+ results = search_arxiv(user_input)
582
+ st.markdown(results)
583
+
584
+ # Display Chat History
585
+ st.subheader("Chat History πŸ“œ")
586
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
587
+
588
+ with tab1:
589
+ for chat in st.session_state.chat_history:
590
+ st.text_area("You:", chat["user"], height=100, disabled=True)
591
+ st.text_area("Claude:", chat["claude"], height=200, disabled=True)
592
+ st.markdown("---")
593
+
594
+ with tab2:
595
+ for message in st.session_state.messages:
596
+ with st.chat_message(message["role"]):
597
+ st.markdown(message["content"])
598
+
599
+ elif tab_main == "πŸ“Έ Media Gallery":
600
+ create_media_gallery()
601
+
602
+ elif tab_main == "πŸ” Search ArXiv":
603
+ query = st.text_input("Enter your research query:")
604
+ if query:
605
+ with st.spinner("Searching ArXiv..."):
606
+ results = search_arxiv(query)
607
+ st.markdown(results)
608
+
609
+ elif tab_main == "πŸ“ File Editor":
610
+ if hasattr(st.session_state, 'current_file'):
611
+ st.subheader(f"Editing: {st.session_state.current_file}")
612
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
613
+ if st.button("Save Changes"):
614
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
615
+ file.write(new_content)
616
+ st.success("File updated successfully!")
617
+
618
+ # Always show file manager in sidebar
619
+ display_file_manager()
620
+
621
+ if __name__ == "__main__":
622
+ main()