awacke1 commited on
Commit
d3455e3
β€’
1 Parent(s): 7ff0c20

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +486 -0
app.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic
3
+ import openai
4
+ import base64
5
+ from datetime import datetime
6
+ import plotly.graph_objects as go
7
+ import cv2
8
+ import glob
9
+ import json
10
+ import math
11
+ import os
12
+ import pytz
13
+ import random
14
+ import re
15
+ import requests
16
+ import streamlit.components.v1 as components
17
+ import textract
18
+ import time
19
+ import zipfile
20
+ from audio_recorder_streamlit import audio_recorder
21
+ from bs4 import BeautifulSoup
22
+ from collections import deque
23
+ from dotenv import load_dotenv
24
+ from gradio_client import Client, handle_file
25
+ from huggingface_hub import InferenceClient
26
+ from io import BytesIO
27
+ from moviepy.editor import VideoFileClip
28
+ from PIL import Image
29
+ from PyPDF2 import PdfReader
30
+ from urllib.parse import quote
31
+ from xml.etree import ElementTree as ET
32
+ from openai import OpenAI
33
+
34
+ # 1. Configuration and Setup
35
+ Site_Name = 'πŸ€–πŸ§ Claude35πŸ“πŸ”¬'
36
+ title = "πŸ€–πŸ§ Claude35πŸ“πŸ”¬"
37
+ helpURL = 'https://huggingface.co/awacke1'
38
+ bugURL = 'https://huggingface.co/spaces/awacke1'
39
+ icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
40
+
41
+ st.set_page_config(
42
+ page_title=title,
43
+ page_icon=icons,
44
+ layout="wide",
45
+ initial_sidebar_state="auto",
46
+ menu_items={
47
+ 'Get Help': helpURL,
48
+ 'Report a bug': bugURL,
49
+ 'About': title
50
+ }
51
+ )
52
+
53
+ # Custom CSS
54
+ st.markdown("""
55
+ <style>
56
+ .main {
57
+ background: linear-gradient(to right, #1a1a1a, #2d2d2d);
58
+ color: #ffffff;
59
+ }
60
+ .stMarkdown {
61
+ font-family: 'Helvetica Neue', sans-serif;
62
+ }
63
+ .category-header {
64
+ background: linear-gradient(45deg, #2b5876, #4e4376);
65
+ padding: 20px;
66
+ border-radius: 10px;
67
+ margin: 10px 0;
68
+ }
69
+ .scene-card {
70
+ background: rgba(0,0,0,0.3);
71
+ padding: 15px;
72
+ border-radius: 8px;
73
+ margin: 10px 0;
74
+ border: 1px solid rgba(255,255,255,0.1);
75
+ }
76
+ .media-gallery {
77
+ display: grid;
78
+ gap: 1rem;
79
+ padding: 1rem;
80
+ }
81
+ .bike-card {
82
+ background: rgba(255,255,255,0.05);
83
+ border-radius: 10px;
84
+ padding: 15px;
85
+ transition: transform 0.3s;
86
+ }
87
+ .bike-card:hover {
88
+ transform: scale(1.02);
89
+ }
90
+ </style>
91
+ """, unsafe_allow_html=True)
92
+
93
+ # 2. Load environment variables and initialize clients
94
+ load_dotenv()
95
+
96
+ # OpenAI setup
97
+ openai.api_key = os.getenv('OPENAI_API_KEY')
98
+ if openai.api_key == None:
99
+ openai.api_key = st.secrets['OPENAI_API_KEY']
100
+
101
+ openai_client = OpenAI(
102
+ api_key=os.getenv('OPENAI_API_KEY'),
103
+ organization=os.getenv('OPENAI_ORG_ID')
104
+ )
105
+
106
+ # Claude setup
107
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
108
+ if anthropic_key == None:
109
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
110
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
111
+
112
+ # Initialize session states
113
+ if "chat_history" not in st.session_state:
114
+ st.session_state.chat_history = []
115
+ if "openai_model" not in st.session_state:
116
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
117
+ if "messages" not in st.session_state:
118
+ st.session_state.messages = []
119
+
120
+ # Bike Collections
121
+ bike_collections = {
122
+ "Celestial Collection 🌌": {
123
+ "Eclipse Vaulter": {
124
+ "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
125
+ The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
126
+ Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
127
+ Camera angle: Low angle, wide shot
128
+ Lighting: Dramatic rim lighting from eclipse
129
+ Color palette: Deep purples, cosmic blues, corona gold""",
130
+ "emoji": "πŸŒ‘"
131
+ },
132
+ "Starlight Leaper": {
133
+ "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
134
+ Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
135
+ Camera angle: Wide-angle upward shot
136
+ Lighting: Natural starlight with subtle rim lighting
137
+ Color palette: Deep blues, silver highlights, cosmic purples""",
138
+ "emoji": "✨"
139
+ },
140
+ "Moonlit Hopper": {
141
+ "prompt": """A sleek black bike mid-hop over a moonlit meadow,
142
+ the full moon illuminating the misty surroundings. Fireflies dance around the bike,
143
+ and soft shadows create a serene yet dynamic atmosphere.
144
+ Camera angle: Side profile with slight low angle
145
+ Lighting: Soft moonlight with atmospheric fog
146
+ Color palette: Silver blues, soft whites, deep shadows""",
147
+ "emoji": "πŸŒ™"
148
+ }
149
+ },
150
+ "Nature-Inspired Collection 🌲": {
151
+ "Shadow Grasshopper": {
152
+ "prompt": """A black bike jumping between forest paths,
153
+ with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
154
+ as it soars above mossy logs.
155
+ Camera angle: Through-the-trees tracking shot
156
+ Lighting: Natural forest lighting with sun rays
157
+ Color palette: Forest greens, golden sunlight, deep shadows""",
158
+ "emoji": "πŸ¦—"
159
+ },
160
+ "Onyx Leapfrog": {
161
+ "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
162
+ the reflection on the water broken into ripples by the leap. The surrounding forest
163
+ is vibrant with greens and browns.
164
+ Camera angle: Low angle from water level
165
+ Lighting: Golden hour side lighting
166
+ Color palette: Deep blacks, water blues, forest greens""",
167
+ "emoji": "🐸"
168
+ }
169
+ }
170
+ }
171
+
172
+ # Helper Functions
173
+ def generate_filename(prompt, file_type):
174
+ central = pytz.timezone('US/Central')
175
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
176
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
177
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:240]
178
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
179
+
180
+ def create_file(filename, prompt, response, should_save=True):
181
+ if not should_save:
182
+ return
183
+ with open(filename, 'w', encoding='utf-8') as file:
184
+ file.write(prompt + "\n\n" + response)
185
+
186
+ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
187
+ if not should_save:
188
+ return None
189
+ filename = generate_filename(prompt if prompt else content, file_type)
190
+ with open(filename, "w", encoding="utf-8") as f:
191
+ if is_image:
192
+ f.write(content)
193
+ else:
194
+ f.write(prompt + "\n\n" + content if prompt else content)
195
+ return filename
196
+
197
+ def get_download_link(file_path):
198
+ with open(file_path, "rb") as file:
199
+ contents = file.read()
200
+ b64 = base64.b64encode(contents).decode()
201
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
202
+
203
+ def load_file(file_name):
204
+ with open(file_name, "r", encoding='utf-8') as file:
205
+ content = file.read()
206
+ return content
207
+
208
+ def create_zip_of_files(files):
209
+ zip_name = "all_files.zip"
210
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
211
+ for file in files:
212
+ zipf.write(file)
213
+ return zip_name
214
+
215
+ def get_media_html(media_path, media_type="video", width="100%"):
216
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
217
+ if media_type == "video":
218
+ return f'''
219
+ <video width="{width}" controls autoplay muted loop>
220
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
221
+ Your browser does not support the video tag.
222
+ </video>
223
+ '''
224
+ else: # audio
225
+ return f'''
226
+ <audio controls style="width: {width};">
227
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
228
+ Your browser does not support the audio element.
229
+ </audio>
230
+ '''
231
+
232
+ # Media Processing Functions
233
+ def process_image(image_input, user_prompt):
234
+ if isinstance(image_input, str):
235
+ with open(image_input, "rb") as image_file:
236
+ image_input = image_file.read()
237
+
238
+ base64_image = base64.b64encode(image_input).decode("utf-8")
239
+
240
+ response = openai_client.chat.completions.create(
241
+ model=st.session_state["openai_model"],
242
+ messages=[
243
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
244
+ {"role": "user", "content": [
245
+ {"type": "text", "text": user_prompt},
246
+ {"type": "image_url", "image_url": {
247
+ "url": f"data:image/png;base64,{base64_image}"
248
+ }}
249
+ ]}
250
+ ],
251
+ temperature=0.0,
252
+ )
253
+
254
+ return response.choices[0].message.content
255
+
256
+ def process_audio(audio_input, text_input=''):
257
+ if isinstance(audio_input, str):
258
+ with open(audio_input, "rb") as file:
259
+ audio_input = file.read()
260
+
261
+ transcription = openai_client.audio.transcriptions.create(
262
+ model="whisper-1",
263
+ file=audio_input,
264
+ )
265
+
266
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
267
+
268
+ with st.chat_message("assistant"):
269
+ st.markdown(transcription.text)
270
+
271
+ filename = generate_filename(transcription.text, "wav")
272
+ create_and_save_file(audio_input, "wav", transcription.text, True)
273
+
274
+ def process_video(video_path, seconds_per_frame=1):
275
+ base64Frames = []
276
+ video = cv2.VideoCapture(video_path)
277
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
278
+ fps = video.get(cv2.CAP_PROP_FPS)
279
+ frames_to_skip = int(fps * seconds_per_frame)
280
+
281
+ for frame_idx in range(0, total_frames, frames_to_skip):
282
+ video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
283
+ success, frame = video.read()
284
+ if not success:
285
+ break
286
+ _, buffer = cv2.imencode(".jpg", frame)
287
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
288
+
289
+ video.release()
290
+
291
+ # Extract audio
292
+ base_video_path = os.path.splitext(video_path)[0]
293
+ audio_path = f"{base_video_path}.mp3"
294
+ try:
295
+ video_clip = VideoFileClip(video_path)
296
+ video_clip.audio.write_audiofile(audio_path)
297
+ video_clip.close()
298
+ except:
299
+ st.warning("No audio track found in video")
300
+ audio_path = None
301
+
302
+ return base64Frames, audio_path
303
+
304
+ def process_video_with_gpt(video_input, user_prompt):
305
+ base64Frames, audio_path = process_video(video_input)
306
+
307
+ response = openai_client.chat.completions.create(
308
+ model=st.session_state["openai_model"],
309
+ messages=[
310
+ {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
311
+ {"role": "user", "content": [
312
+ {"type": "text", "text": user_prompt},
313
+ *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
314
+ for frame in base64Frames]
315
+ ]}
316
+ ]
317
+ )
318
+
319
+ return response.choices[0].message.content
320
+
321
+ def create_media_gallery():
322
+ st.header("🎬 Media Gallery")
323
+
324
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"])
325
+
326
+ with tabs[0]:
327
+ image_files = glob.glob("*.png") + glob.glob("*.jpg")
328
+ if image_files:
329
+ num_cols = st.slider("Number of columns", 1, 5, 3)
330
+ cols = st.columns(num_cols)
331
+ for idx, image_file in enumerate(image_files):
332
+ with cols[idx % num_cols]:
333
+ img = Image.open(image_file)
334
+ st.image(img, use_container_width=True)
335
+
336
+ # Add GPT vision analysis option
337
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
338
+ analysis = process_image(image_file,
339
+ "Describe this image in detail and identify key elements.")
340
+ st.markdown(analysis)
341
+
342
+ with tabs[1]:
343
+ audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
344
+ for audio_file in audio_files:
345
+ with st.expander(f"🎡 {os.path.basename(audio_file)}"):
346
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
347
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
348
+ with open(audio_file, "rb") as f:
349
+ transcription = process_audio(f)
350
+ st.write(transcription)
351
+
352
+ with tabs[2]:
353
+ video_files = glob.glob("*.mp4")
354
+ for video_file in video_files:
355
+ with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
356
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
357
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
358
+ analysis = process_video_with_gpt(video_file,
359
+ "Describe what's happening in this video.")
360
+ st.markdown(analysis)
361
+
362
+ with tabs[3]:
363
+ for collection_name, bikes in bike_collections.items():
364
+ st.subheader(collection_name)
365
+ cols = st.columns(len(bikes))
366
+
367
+ for idx, (bike_name, details) in enumerate(bikes.items()):
368
+ with cols[idx]:
369
+ st.markdown(f"""
370
+ <div class='bike-card'>
371
+ <h3>{details['emoji']} {bike_name}</h3>
372
+ <p>{details['prompt']}</p>
373
+ </div>
374
+ """, unsafe_allow_html=True)
375
+
376
+ if st.button(f"Generate {bike_name} Scene"):
377
+ prompt = details['prompt']
378
+ # Here you could integrate with image generation API
379
+ st.write(f"Generated scene description for {bike_name}:")
380
+ st.write(prompt)
381
+
382
+ def display_file_manager():
383
+ """Display file management sidebar."""
384
+ st.sidebar.title("πŸ“ File Management")
385
+
386
+ all_files = glob.glob("*.md")
387
+ all_files.sort(reverse=True)
388
+
389
+ if st.sidebar.button("πŸ—‘ Delete All"):
390
+ for file in all_files:
391
+ os.remove(file)
392
+ st.rerun()
393
+
394
+ if st.sidebar.button("⬇️ Download All"):
395
+ zip_file = create_zip_of_files(all_files)
396
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
397
+
398
+ for file in all_files:
399
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
400
+ with col1:
401
+ if st.button("🌐", key="view_"+file):
402
+ st.session_state.current_file = file
403
+ st.session_state.file_content = load_file(file)
404
+ with col2:
405
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
406
+ with col3:
407
+ if st.button("πŸ“‚", key="edit_"+file):
408
+ st.session_state.current_file = file
409
+ st.session_state.file_content = load_file(file)
410
+ with col4:
411
+ if st.button("πŸ—‘", key="delete_"+file):
412
+ os.remove(file)
413
+ st.rerun()
414
+
415
+ def main():
416
+ st.title("🚲 Bike Cinematic Universe & AI Assistant")
417
+
418
+ # Main navigation
419
+ tab_main = st.radio("Choose Action:",
420
+ ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
421
+ horizontal=True)
422
+
423
+ if tab_main == "πŸ’¬ Chat":
424
+ # Model Selection
425
+ model_choice = st.sidebar.radio(
426
+ "Choose AI Model:",
427
+ ["GPT-4o", "Claude-3", "Both"]
428
+ )
429
+
430
+ # Chat Interface
431
+ user_input = st.text_area("Message:", height=100)
432
+
433
+ if st.button("Send πŸ“¨"):
434
+ if user_input:
435
+ if model_choice == "GPT-4o":
436
+ gpt_response = process_with_gpt(user_input)
437
+ elif model_choice == "Claude-3":
438
+ claude_response = process_with_claude(user_input)
439
+ else: # Both
440
+ col1, col2 = st.columns(2)
441
+ with col1:
442
+ st.subheader("GPT-4o Response")
443
+ gpt_response = process_with_gpt(user_input)
444
+ with col2:
445
+ st.subheader("Claude-3 Response")
446
+ claude_response = process_with_claude(user_input)
447
+
448
+ # Display Chat History
449
+ st.subheader("Chat History πŸ“œ")
450
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
451
+
452
+ with tab1:
453
+ for chat in st.session_state.chat_history:
454
+ st.text_area("You:", chat["user"], height=100, disabled=True)
455
+ st.text_area("Claude:", chat["claude"], height=200, disabled=True)
456
+ st.markdown("---")
457
+
458
+ with tab2:
459
+ for message in st.session_state.messages:
460
+ with st.chat_message(message["role"]):
461
+ st.markdown(message["content"])
462
+
463
+ elif tab_main == "πŸ“Έ Media Gallery":
464
+ create_media_gallery()
465
+
466
+ elif tab_main == "πŸ” Search ArXiv":
467
+ query = st.text_input("Enter your research query:")
468
+ if query:
469
+ with st.spinner("Searching ArXiv..."):
470
+ results = search_arxiv(query)
471
+ st.markdown(results)
472
+
473
+ elif tab_main == "πŸ“ File Editor":
474
+ if hasattr(st.session_state, 'current_file'):
475
+ st.subheader(f"Editing: {st.session_state.current_file}")
476
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
477
+ if st.button("Save Changes"):
478
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
479
+ file.write(new_content)
480
+ st.success("File updated successfully!")
481
+
482
+ # Always show file manager in sidebar
483
+ display_file_manager()
484
+
485
+ if __name__ == "__main__":
486
+ main()