awacke1 commited on
Commit
643516d
Β·
verified Β·
1 Parent(s): 17fd422

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +344 -113
app.py CHANGED
@@ -1,36 +1,94 @@
1
  import streamlit as st
 
 
2
  import base64
3
  from datetime import datetime
4
  import plotly.graph_objects as go
5
  import cv2
 
 
 
6
  import os
7
  import pytz
8
  import random
9
  import re
10
  import requests
11
- from moviepy.editor import VideoFileClip
12
- from PIL import Image
13
- import glob
 
14
  from audio_recorder_streamlit import audio_recorder
15
- import json
16
- from openai import OpenAI
17
  from dotenv import load_dotenv
 
18
  from huggingface_hub import InferenceClient
19
- from bs4 import BeautifulSoup
20
- import textract
21
- from xml.etree import ElementTree as ET
 
22
  from urllib.parse import quote
23
- import time
24
- from collections import deque
 
 
 
 
 
 
 
25
 
26
- # Page config
27
  st.set_page_config(
28
- page_title="Bike Cinematic Universe 🎬",
29
- page_icon="🚲",
30
- layout="wide"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  )
32
 
33
- # Custom CSS with expanded styling
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  st.markdown("""
35
  <style>
36
  .main {
@@ -70,29 +128,6 @@ st.markdown("""
70
  </style>
71
  """, unsafe_allow_html=True)
72
 
73
- # Load environment variables
74
- load_dotenv()
75
-
76
- # Initialize OpenAI client
77
- client = OpenAI(
78
- api_key=os.getenv('OPENAI_API_KEY'),
79
- organization=os.getenv('OPENAI_ORG_ID')
80
- )
81
-
82
- # Initialize session state
83
- if "openai_model" not in st.session_state:
84
- st.session_state["openai_model"] = "gpt-4o-2024-05-13"
85
- if "messages" not in st.session_state:
86
- st.session_state.messages = []
87
-
88
- # Hugging Face settings
89
- API_URL = os.getenv('API_URL')
90
- HF_KEY = os.getenv('HF_KEY')
91
- headers = {
92
- "Authorization": f"Bearer {HF_KEY}",
93
- "Content-Type": "application/json"
94
- }
95
-
96
  # Bike Collections
97
  bike_collections = {
98
  "Celestial Collection 🌌": {
@@ -112,21 +147,40 @@ bike_collections = {
112
  Lighting: Natural starlight with subtle rim lighting
113
  Color palette: Deep blues, silver highlights, cosmic purples""",
114
  "emoji": "✨"
 
 
 
 
 
 
 
 
 
115
  }
116
  },
117
  "Nature-Inspired Collection 🌲": {
118
  "Shadow Grasshopper": {
119
- "prompt": """A black bike jumping between forest paths.
120
- Dappled sunlight streams through the canopy, creating dynamic shadows.
 
121
  Camera angle: Through-the-trees tracking shot
122
  Lighting: Natural forest lighting with sun rays
123
  Color palette: Forest greens, golden sunlight, deep shadows""",
124
  "emoji": "πŸ¦—"
 
 
 
 
 
 
 
 
 
125
  }
126
  }
127
  }
128
 
129
- # File handling functions
130
  def generate_filename(prompt, file_type):
131
  """Generate a safe filename using the prompt and file type."""
132
  central = pytz.timezone('US/Central')
@@ -139,32 +193,24 @@ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, s
139
  """Create and save file with proper handling of different types."""
140
  if not should_save:
141
  return None
142
-
143
  filename = generate_filename(prompt if prompt else content, file_type)
144
-
145
- if file_type == "md":
146
- title_from_content = extract_markdown_title(content)
147
- if title_from_content:
148
- filename = generate_filename(title_from_content, file_type)
149
-
150
  with open(filename, "w", encoding="utf-8") as f:
151
  if is_image:
152
  f.write(content)
153
  else:
154
- f.write(prompt + "\n\n" + content)
155
-
156
  return filename
157
 
158
- def extract_markdown_title(content):
159
- """Extract the first markdown title from content."""
160
- title_match = re.search(r'^\s*#\s*(.+)', content, re.MULTILINE)
161
- if title_match:
162
- return title_match.group(1).strip()
163
- return None
164
 
165
- # HTML5 Speech Synthesis
166
  @st.cache_resource
167
  def SpeechSynthesis(result):
 
168
  documentHTML5 = f'''
169
  <!DOCTYPE html>
170
  <html>
@@ -186,32 +232,9 @@ def SpeechSynthesis(result):
186
  </body>
187
  </html>
188
  '''
189
- st.components.v1.html(documentHTML5, width=1280, height=300)
190
-
191
- # Process functions for different media types
192
- def process_text(text_input):
193
- """Process text input with GPT-4o."""
194
- if text_input:
195
- st.session_state.messages.append({"role": "user", "content": text_input})
196
-
197
- with st.chat_message("user"):
198
- st.markdown(text_input)
199
-
200
- with st.chat_message("assistant"):
201
- completion = client.chat.completions.create(
202
- model=st.session_state["openai_model"],
203
- messages=[
204
- {"role": m["role"], "content": m["content"]}
205
- for m in st.session_state.messages
206
- ],
207
- stream=False
208
- )
209
- return_text = completion.choices[0].message.content
210
- st.write("Assistant: " + return_text)
211
-
212
- create_and_save_file(return_text, file_type="md", prompt=text_input)
213
- st.session_state.messages.append({"role": "assistant", "content": return_text})
214
 
 
215
  def process_image(image_input, user_prompt):
216
  """Process image with GPT-4o vision."""
217
  if isinstance(image_input, str):
@@ -220,7 +243,7 @@ def process_image(image_input, user_prompt):
220
 
221
  base64_image = base64.b64encode(image_input).decode("utf-8")
222
 
223
- response = client.chat.completions.create(
224
  model=st.session_state["openai_model"],
225
  messages=[
226
  {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
@@ -237,12 +260,12 @@ def process_image(image_input, user_prompt):
237
  return response.choices[0].message.content
238
 
239
  def process_audio(audio_input, text_input=''):
240
- """Process audio with GPT-4o and Whisper."""
241
  if isinstance(audio_input, str):
242
  with open(audio_input, "rb") as file:
243
  audio_input = file.read()
244
 
245
- transcription = client.audio.transcriptions.create(
246
  model="whisper-1",
247
  file=audio_input,
248
  )
@@ -254,7 +277,7 @@ def process_audio(audio_input, text_input=''):
254
  SpeechSynthesis(transcription.text)
255
 
256
  filename = generate_filename(transcription.text, "wav")
257
- create_and_save_file(audio_input.getvalue(), "wav", transcription.text, True)
258
 
259
  def process_video(video_path, seconds_per_frame=1):
260
  """Process video files for frame extraction and audio."""
@@ -287,6 +310,117 @@ def process_video(video_path, seconds_per_frame=1):
287
 
288
  return base64Frames, audio_path
289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  def create_media_gallery():
291
  """Create the media gallery interface."""
292
  st.header("🎬 Media Gallery")
@@ -296,37 +430,39 @@ def create_media_gallery():
296
  with tabs[0]:
297
  image_files = glob.glob("*.png") + glob.glob("*.jpg")
298
  if image_files:
299
- cols = st.columns(3)
 
300
  for idx, image_file in enumerate(image_files):
301
- with cols[idx % 3]:
302
- st.image(image_file)
303
- st.caption(os.path.basename(image_file))
304
 
305
- # Add prompt input for GPT-4o analysis
306
- prompt = st.text_input(f"Analyze image {idx}",
307
- "Describe this image in detail and list key elements.")
308
- if st.button(f"Analyze {idx}"):
309
- analysis = process_image(image_file, prompt)
310
  st.markdown(analysis)
311
 
312
  with tabs[1]:
313
  audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
314
  for audio_file in audio_files:
315
  with st.expander(f"🎡 {os.path.basename(audio_file)}"):
316
- st.audio(audio_file)
317
- if st.button(f"Transcribe {audio_file}"):
318
- process_audio(audio_file)
 
 
319
 
320
  with tabs[2]:
321
  video_files = glob.glob("*.mp4")
322
  for video_file in video_files:
323
  with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
324
- st.video(video_file)
325
- if st.button(f"Analyze {video_file}"):
326
- frames, audio = process_video(video_file)
327
- if audio:
328
- st.audio(audio)
329
-
330
  with tabs[3]:
331
  for collection_name, bikes in bike_collections.items():
332
  st.subheader(collection_name)
@@ -340,20 +476,115 @@ def create_media_gallery():
340
  <p>{details['prompt']}</p>
341
  </div>
342
  """, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
 
344
  def main():
345
- st.title("🚲 Bike Cinematic Universe")
346
 
347
  # Main navigation
348
  tab_main = st.radio("Choose Action:",
349
- ["πŸ“Έ Upload Media", "🎬 View Gallery", "🎨 Generate Scene", "πŸ€– Chat"],
350
  horizontal=True)
351
 
352
- if tab_main == "πŸ“Έ Upload Media":
353
- col1, col2 = st.columns(2)
 
 
 
 
354
 
355
- with col1:
356
- uploaded_image = st.file_uploader("Upload Image", type=['png', 'jpg'])
357
- if uploaded_image:
358
- st.image(uploaded_image)
359
- prompt = st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import anthropic
3
+ import openai
4
  import base64
5
  from datetime import datetime
6
  import plotly.graph_objects as go
7
  import cv2
8
+ import glob
9
+ import json
10
+ import math
11
  import os
12
  import pytz
13
  import random
14
  import re
15
  import requests
16
+ import streamlit.components.v1 as components
17
+ import textract
18
+ import time
19
+ import zipfile
20
  from audio_recorder_streamlit import audio_recorder
21
+ from bs4 import BeautifulSoup
22
+ from collections import deque
23
  from dotenv import load_dotenv
24
+ from gradio_client import Client, handle_file
25
  from huggingface_hub import InferenceClient
26
+ from io import BytesIO
27
+ from moviepy.editor import VideoFileClip
28
+ from PIL import Image
29
+ from PyPDF2 import PdfReader
30
  from urllib.parse import quote
31
+ from xml.etree import ElementTree as ET
32
+ from openai import OpenAI
33
+
34
+ # 1. Configuration and Setup
35
+ Site_Name = 'πŸ€–πŸ§ Claude35πŸ“πŸ”¬'
36
+ title = "πŸ€–πŸ§ Claude35πŸ“πŸ”¬"
37
+ helpURL = 'https://huggingface.co/awacke1'
38
+ bugURL = 'https://huggingface.co/spaces/awacke1'
39
+ icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
40
 
 
41
  st.set_page_config(
42
+ page_title=title,
43
+ page_icon=icons,
44
+ layout="wide",
45
+ initial_sidebar_state="auto",
46
+ menu_items={
47
+ 'Get Help': helpURL,
48
+ 'Report a bug': bugURL,
49
+ 'About': title
50
+ }
51
+ )
52
+
53
+ # 2. Load environment variables and initialize clients
54
+ load_dotenv()
55
+
56
+ # OpenAI setup
57
+ openai.api_key = os.getenv('OPENAI_API_KEY')
58
+ if openai.api_key == None:
59
+ openai.api_key = st.secrets['OPENAI_API_KEY']
60
+
61
+ openai_client = OpenAI(
62
+ api_key=os.getenv('OPENAI_API_KEY'),
63
+ organization=os.getenv('OPENAI_ORG_ID')
64
  )
65
 
66
+ # Claude setup
67
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY")
68
+ if anthropic_key == None:
69
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
+
72
+ # HuggingFace setup
73
+ API_URL = os.getenv('API_URL')
74
+ HF_KEY = os.getenv('HF_KEY')
75
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
76
+ MODEL2 = "openai/whisper-small.en"
77
+
78
+ headers = {
79
+ "Authorization": f"Bearer {HF_KEY}",
80
+ "Content-Type": "application/json"
81
+ }
82
+
83
+ # Initialize session states
84
+ if "chat_history" not in st.session_state:
85
+ st.session_state.chat_history = []
86
+ if "openai_model" not in st.session_state:
87
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
88
+ if "messages" not in st.session_state:
89
+ st.session_state.messages = []
90
+
91
+ # Custom CSS
92
  st.markdown("""
93
  <style>
94
  .main {
 
128
  </style>
129
  """, unsafe_allow_html=True)
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  # Bike Collections
132
  bike_collections = {
133
  "Celestial Collection 🌌": {
 
147
  Lighting: Natural starlight with subtle rim lighting
148
  Color palette: Deep blues, silver highlights, cosmic purples""",
149
  "emoji": "✨"
150
+ },
151
+ "Moonlit Hopper": {
152
+ "prompt": """A sleek black bike mid-hop over a moonlit meadow,
153
+ the full moon illuminating the misty surroundings. Fireflies dance around the bike,
154
+ and soft shadows create a serene yet dynamic atmosphere.
155
+ Camera angle: Side profile with slight low angle
156
+ Lighting: Soft moonlight with atmospheric fog
157
+ Color palette: Silver blues, soft whites, deep shadows""",
158
+ "emoji": "πŸŒ™"
159
  }
160
  },
161
  "Nature-Inspired Collection 🌲": {
162
  "Shadow Grasshopper": {
163
+ "prompt": """A black bike jumping between forest paths,
164
+ with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
165
+ as it soars above mossy logs.
166
  Camera angle: Through-the-trees tracking shot
167
  Lighting: Natural forest lighting with sun rays
168
  Color palette: Forest greens, golden sunlight, deep shadows""",
169
  "emoji": "πŸ¦—"
170
+ },
171
+ "Onyx Leapfrog": {
172
+ "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
173
+ the reflection on the water broken into ripples by the leap. The surrounding forest
174
+ is vibrant with greens and browns.
175
+ Camera angle: Low angle from water level
176
+ Lighting: Golden hour side lighting
177
+ Color palette: Deep blacks, water blues, forest greens""",
178
+ "emoji": "🐸"
179
  }
180
  }
181
  }
182
 
183
+ # Helper Functions
184
  def generate_filename(prompt, file_type):
185
  """Generate a safe filename using the prompt and file type."""
186
  central = pytz.timezone('US/Central')
 
193
  """Create and save file with proper handling of different types."""
194
  if not should_save:
195
  return None
 
196
  filename = generate_filename(prompt if prompt else content, file_type)
 
 
 
 
 
 
197
  with open(filename, "w", encoding="utf-8") as f:
198
  if is_image:
199
  f.write(content)
200
  else:
201
+ f.write(prompt + "\n\n" + content if prompt else content)
 
202
  return filename
203
 
204
+ def get_download_link(file_path):
205
+ """Create download link for file."""
206
+ with open(file_path, "rb") as file:
207
+ contents = file.read()
208
+ b64 = base64.b64encode(contents).decode()
209
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
210
 
 
211
  @st.cache_resource
212
  def SpeechSynthesis(result):
213
+ """HTML5 Speech Synthesis."""
214
  documentHTML5 = f'''
215
  <!DOCTYPE html>
216
  <html>
 
232
  </body>
233
  </html>
234
  '''
235
+ components.html(documentHTML5, width=1280, height=300)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
 
237
+ # Media Processing Functions
238
  def process_image(image_input, user_prompt):
239
  """Process image with GPT-4o vision."""
240
  if isinstance(image_input, str):
 
243
 
244
  base64_image = base64.b64encode(image_input).decode("utf-8")
245
 
246
+ response = openai_client.chat.completions.create(
247
  model=st.session_state["openai_model"],
248
  messages=[
249
  {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
 
260
  return response.choices[0].message.content
261
 
262
  def process_audio(audio_input, text_input=''):
263
+ """Process audio with Whisper and GPT."""
264
  if isinstance(audio_input, str):
265
  with open(audio_input, "rb") as file:
266
  audio_input = file.read()
267
 
268
+ transcription = openai_client.audio.transcriptions.create(
269
  model="whisper-1",
270
  file=audio_input,
271
  )
 
277
  SpeechSynthesis(transcription.text)
278
 
279
  filename = generate_filename(transcription.text, "wav")
280
+ create_and_save_file(audio_input, "wav", transcription.text, True)
281
 
282
  def process_video(video_path, seconds_per_frame=1):
283
  """Process video files for frame extraction and audio."""
 
310
 
311
  return base64Frames, audio_path
312
 
313
+ def process_video_with_gpt(video_input, user_prompt):
314
+ """Process video with GPT-4o vision."""
315
+ base64Frames, audio_path = process_video(video_input)
316
+
317
+ response = openai_client.chat.completions.create(
318
+ model=st.session_state["openai_model"],
319
+ messages=[
320
+ {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
321
+ {"role": "user", "content": [
322
+ {"type": "text", "text": user_prompt},
323
+ *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
324
+ for frame in base64Frames]
325
+ ]}
326
+ ]
327
+ )
328
+
329
+ return response.choices[0].message.content
330
+
331
+ # ArXiv Search Functions
332
+ def search_arxiv(query):
333
+ """Search ArXiv papers using Hugging Face client."""
334
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
335
+ response = client.predict(
336
+ query,
337
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
338
+ True,
339
+ api_name="/ask_llm"
340
+ )
341
+ return response
342
+
343
+ # Chat Processing Functions
344
+ def process_with_gpt(text_input):
345
+ """Process text with GPT-4o."""
346
+ if text_input:
347
+ st.session_state.messages.append({"role": "user", "content": text_input})
348
+
349
+ with st.chat_message("user"):
350
+ st.markdown(text_input)
351
+
352
+ with st.chat_message("assistant"):
353
+ completion = openai_client.chat.completions.create(
354
+ model=st.session_state["openai_model"],
355
+ messages=[
356
+ {"role": m["role"], "content": m["content"]}
357
+ for m in st.session_state.messages
358
+ ],
359
+ stream=False
360
+ )
361
+ return_text = completion.choices[0].message.content
362
+ st.write("GPT-4o: " + return_text)
363
+
364
+ filename = generate_filename(text_input, "md")
365
+ create_file(filename, text_input, return_text)
366
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
367
+ return return_text
368
+
369
+ def process_with_claude(text_input):
370
+ """Process text with Claude."""
371
+ if text_input:
372
+ response = claude_client.messages.create(
373
+ model="claude-3-sonnet-20240229",
374
+ max_tokens=1000,
375
+ messages=[
376
+ {"role": "user", "content": text_input}
377
+ ]
378
+ )
379
+ response_text = response.content[0].text
380
+ st.write("st.write("Claude: " + response_text)
381
+
382
+ filename = generate_filename(text_input, "md")
383
+ create_file(filename, text_input, response_text)
384
+
385
+ st.session_state.chat_history.append({
386
+ "user": text_input,
387
+ "claude": response_text
388
+ })
389
+ return response_text
390
+
391
+ # File Management Functions
392
+ def load_file(file_name):
393
+ """Load file content."""
394
+ with open(file_name, "r", encoding='utf-8') as file:
395
+ content = file.read()
396
+ return content
397
+
398
+ def create_zip_of_files(files):
399
+ """Create zip archive of files."""
400
+ zip_name = "all_files.zip"
401
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
402
+ for file in files:
403
+ zipf.write(file)
404
+ return zip_name
405
+
406
+ def get_media_html(media_path, media_type="video", width="100%"):
407
+ """Generate HTML for media player."""
408
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
409
+ if media_type == "video":
410
+ return f'''
411
+ <video width="{width}" controls autoplay muted loop>
412
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
413
+ Your browser does not support the video tag.
414
+ </video>
415
+ '''
416
+ else: # audio
417
+ return f'''
418
+ <audio controls style="width: {width};">
419
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
420
+ Your browser does not support the audio element.
421
+ </audio>
422
+ '''
423
+
424
  def create_media_gallery():
425
  """Create the media gallery interface."""
426
  st.header("🎬 Media Gallery")
 
430
  with tabs[0]:
431
  image_files = glob.glob("*.png") + glob.glob("*.jpg")
432
  if image_files:
433
+ num_cols = st.slider("Number of columns", 1, 5, 3)
434
+ cols = st.columns(num_cols)
435
  for idx, image_file in enumerate(image_files):
436
+ with cols[idx % num_cols]:
437
+ img = Image.open(image_file)
438
+ st.image(img, use_column_width=True)
439
 
440
+ # Add GPT vision analysis option
441
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
442
+ analysis = process_image(image_file,
443
+ "Describe this image in detail and identify key elements.")
 
444
  st.markdown(analysis)
445
 
446
  with tabs[1]:
447
  audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
448
  for audio_file in audio_files:
449
  with st.expander(f"🎡 {os.path.basename(audio_file)}"):
450
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
451
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
452
+ with open(audio_file, "rb") as f:
453
+ transcription = process_audio(f)
454
+ st.write(transcription)
455
 
456
  with tabs[2]:
457
  video_files = glob.glob("*.mp4")
458
  for video_file in video_files:
459
  with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
460
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
461
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
462
+ analysis = process_video_with_gpt(video_file,
463
+ "Describe what's happening in this video.")
464
+ st.markdown(analysis)
465
+
466
  with tabs[3]:
467
  for collection_name, bikes in bike_collections.items():
468
  st.subheader(collection_name)
 
476
  <p>{details['prompt']}</p>
477
  </div>
478
  """, unsafe_allow_html=True)
479
+
480
+ if st.button(f"Generate {bike_name} Scene"):
481
+ prompt = details['prompt']
482
+ # Here you could integrate with image generation API
483
+ st.write(f"Generated scene description for {bike_name}:")
484
+ st.write(prompt)
485
+
486
+ def display_file_manager():
487
+ """Display file management sidebar."""
488
+ st.sidebar.title("πŸ“ File Management")
489
+
490
+ all_files = glob.glob("*.md")
491
+ all_files.sort(reverse=True)
492
+
493
+ if st.sidebar.button("πŸ—‘ Delete All"):
494
+ for file in all_files:
495
+ os.remove(file)
496
+ st.rerun()
497
+
498
+ if st.sidebar.button("⬇️ Download All"):
499
+ zip_file = create_zip_of_files(all_files)
500
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
501
+
502
+ for file in all_files:
503
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
504
+ with col1:
505
+ if st.button("🌐", key="view_"+file):
506
+ st.session_state.current_file = file
507
+ st.session_state.file_content = load_file(file)
508
+ with col2:
509
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
510
+ with col3:
511
+ if st.button("πŸ“‚", key="edit_"+file):
512
+ st.session_state.current_file = file
513
+ st.session_state.file_content = load_file(file)
514
+ with col4:
515
+ if st.button("πŸ—‘", key="delete_"+file):
516
+ os.remove(file)
517
+ st.rerun()
518
 
519
  def main():
520
+ st.title("🚲 Bike Cinematic Universe & AI Assistant")
521
 
522
  # Main navigation
523
  tab_main = st.radio("Choose Action:",
524
+ ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
525
  horizontal=True)
526
 
527
+ if tab_main == "πŸ’¬ Chat":
528
+ # Model Selection
529
+ model_choice = st.sidebar.radio(
530
+ "Choose AI Model:",
531
+ ["GPT-4o", "Claude-3", "Both"]
532
+ )
533
 
534
+ # Chat Interface
535
+ user_input = st.text_area("Message:", height=100)
536
+
537
+ if st.button("Send πŸ“¨"):
538
+ if user_input:
539
+ if model_choice == "GPT-4o":
540
+ gpt_response = process_with_gpt(user_input)
541
+ elif model_choice == "Claude-3":
542
+ claude_response = process_with_claude(user_input)
543
+ else: # Both
544
+ col1, col2 = st.columns(2)
545
+ with col1:
546
+ st.subheader("GPT-4o Response")
547
+ gpt_response = process_with_gpt(user_input)
548
+ with col2:
549
+ st.subheader("Claude-3 Response")
550
+ claude_response = process_with_claude(user_input)
551
+
552
+ # Display Chat History
553
+ st.subheader("Chat History πŸ“œ")
554
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
555
+
556
+ with tab1:
557
+ for chat in st.session_state.chat_history:
558
+ st.text_area("You:", chat["user"], height=100, disabled=True)
559
+ st.text_area("Claude:", chat["claude"], height=200, disabled=True)
560
+ st.markdown("---")
561
+
562
+ with tab2:
563
+ for message in st.session_state.messages:
564
+ with st.chat_message(message["role"]):
565
+ st.markdown(message["content"])
566
+
567
+ elif tab_main == "πŸ“Έ Media Gallery":
568
+ create_media_gallery()
569
+
570
+ elif tab_main == "πŸ” Search ArXiv":
571
+ query = st.text_input("Enter your research query:")
572
+ if query:
573
+ with st.spinner("Searching ArXiv..."):
574
+ results = search_arxiv(query)
575
+ st.markdown(results)
576
+
577
+ elif tab_main == "πŸ“ File Editor":
578
+ if hasattr(st.session_state, 'current_file'):
579
+ st.subheader(f"Editing: {st.session_state.current_file}")
580
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
581
+ if st.button("Save Changes"):
582
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
583
+ file.write(new_content)
584
+ st.success("File updated successfully!")
585
+
586
+ # Always show file manager in sidebar
587
+ display_file_manager()
588
+
589
+ if __name__ == "__main__":
590
+ main()