awacke1 commited on
Commit
090e21e
โ€ข
1 Parent(s): 27c9e39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -1036
app.py CHANGED
@@ -3,33 +3,16 @@ import anthropic
3
  import openai
4
  import base64
5
  from datetime import datetime
6
- import plotly.graph_objects as go
7
- import cv2
8
- import glob
9
- import json
10
- import math
11
- import os
12
  import pytz
13
- import random
14
  import re
15
- import requests
16
- import streamlit.components.v1 as components
17
- import textract
18
  import time
19
  import zipfile
20
- from audio_recorder_streamlit import audio_recorder
21
- from bs4 import BeautifulSoup
22
- from collections import deque
23
- from dotenv import load_dotenv
24
- from gradio_client import Client, handle_file
25
- from huggingface_hub import InferenceClient
26
  from io import BytesIO
27
- from moviepy.editor import VideoFileClip
28
  from PIL import Image
29
- from PyPDF2 import PdfReader
30
- from urllib.parse import quote
31
- from xml.etree import ElementTree as ET
32
- from openai import OpenAI
33
 
34
  # 1. Configuration and Setup
35
  Site_Name = '๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI'
@@ -51,895 +34,89 @@ st.set_page_config(
51
  )
52
 
53
  # 2. Load environment variables and initialize clients
54
- load_dotenv()
55
-
56
- # OpenAI setup
57
  openai.api_key = os.getenv('OPENAI_API_KEY')
58
- if openai.api_key == None:
59
- openai.api_key = st.secrets['OPENAI_API_KEY']
60
-
61
- openai_client = OpenAI(
62
- api_key=os.getenv('OPENAI_API_KEY'),
63
- organization=os.getenv('OPENAI_ORG_ID')
64
- )
65
-
66
- # 3. Claude setup
67
  anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
68
- if anthropic_key == None:
69
- anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
- claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
 
72
- # 4. Initialize session states
73
- if 'transcript_history' not in st.session_state:
74
- st.session_state.transcript_history = []
75
- if "chat_history" not in st.session_state:
76
- st.session_state.chat_history = []
77
- if "openai_model" not in st.session_state:
78
- st.session_state["openai_model"] = "gpt-4o-2024-05-13"
79
- if "messages" not in st.session_state:
80
- st.session_state.messages = []
81
- if 'last_voice_input' not in st.session_state:
82
- st.session_state.last_voice_input = ""
83
-
84
- # 5. # HuggingFace setup
85
- API_URL = os.getenv('API_URL')
86
- HF_KEY = os.getenv('HF_KEY')
87
- MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
88
- MODEL2 = "openai/whisper-small.en"
89
-
90
- headers = {
91
- "Authorization": f"Bearer {HF_KEY}",
92
- "Content-Type": "application/json"
93
- }
94
 
95
  # Initialize session states
 
 
96
  if "chat_history" not in st.session_state:
97
  st.session_state.chat_history = []
98
- if "openai_model" not in st.session_state:
99
- st.session_state["openai_model"] = "gpt-4o-2024-05-13"
100
  if "messages" not in st.session_state:
101
  st.session_state.messages = []
102
 
103
- # Custom CSS
104
- st.markdown("""
105
- <style>
106
- .main {
107
- background: linear-gradient(to right, #1a1a1a, #2d2d2d);
108
- color: #ffffff;
109
- }
110
- .stMarkdown {
111
- font-family: 'Helvetica Neue', sans-serif;
112
- }
113
- .category-header {
114
- background: linear-gradient(45deg, #2b5876, #4e4376);
115
- padding: 20px;
116
- border-radius: 10px;
117
- margin: 10px 0;
118
- }
119
- .scene-card {
120
- background: rgba(0,0,0,0.3);
121
- padding: 15px;
122
- border-radius: 8px;
123
- margin: 10px 0;
124
- border: 1px solid rgba(255,255,255,0.1);
125
- }
126
- .media-gallery {
127
- display: grid;
128
- gap: 1rem;
129
- padding: 1rem;
130
- }
131
- .bike-card {
132
- background: rgba(255,255,255,0.05);
133
- border-radius: 10px;
134
- padding: 15px;
135
- transition: transform 0.3s;
136
- }
137
- .bike-card:hover {
138
- transform: scale(1.02);
139
- }
140
- </style>
141
- """, unsafe_allow_html=True)
142
-
143
-
144
- # Bike Collections
145
- bike_collections = {
146
- "Celestial Collection ๐ŸŒŒ": {
147
- "Eclipse Vaulter": {
148
- "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
149
- The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
150
- Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
151
- Camera angle: Low angle, wide shot
152
- Lighting: Dramatic rim lighting from eclipse
153
- Color palette: Deep purples, cosmic blues, corona gold""",
154
- "emoji": "๐ŸŒ‘"
155
- },
156
- "Starlight Leaper": {
157
- "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
158
- Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
159
- Camera angle: Wide-angle upward shot
160
- Lighting: Natural starlight with subtle rim lighting
161
- Color palette: Deep blues, silver highlights, cosmic purples""",
162
- "emoji": "โœจ"
163
- },
164
- "Moonlit Hopper": {
165
- "prompt": """A sleek black bike mid-hop over a moonlit meadow,
166
- the full moon illuminating the misty surroundings. Fireflies dance around the bike,
167
- and soft shadows create a serene yet dynamic atmosphere.
168
- Camera angle: Side profile with slight low angle
169
- Lighting: Soft moonlight with atmospheric fog
170
- Color palette: Silver blues, soft whites, deep shadows""",
171
- "emoji": "๐ŸŒ™"
172
- }
173
- },
174
- "Nature-Inspired Collection ๐ŸŒฒ": {
175
- "Shadow Grasshopper": {
176
- "prompt": """A black bike jumping between forest paths,
177
- with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
178
- as it soars above mossy logs.
179
- Camera angle: Through-the-trees tracking shot
180
- Lighting: Natural forest lighting with sun rays
181
- Color palette: Forest greens, golden sunlight, deep shadows""",
182
- "emoji": "๐Ÿฆ—"
183
- },
184
- "Onyx Leapfrog": {
185
- "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
186
- the reflection on the water broken into ripples by the leap. The surrounding forest
187
- is vibrant with greens and browns.
188
- Camera angle: Low angle from water level
189
- Lighting: Golden hour side lighting
190
- Color palette: Deep blacks, water blues, forest greens""",
191
- "emoji": "๐Ÿธ"
192
- }
193
- }
194
- }
195
-
196
-
197
- # Helper Functions
198
- def generate_filename(prompt, file_type):
199
- """Generate a safe filename using the prompt and file type."""
200
- central = pytz.timezone('US/Central')
201
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
202
- replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
203
- safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
204
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
205
-
206
-
207
-
208
-
209
- # Function to create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)
210
- def create_file(filename, prompt, response, should_save=True):
211
- if not should_save:
212
- return
213
- with open(filename, 'w', encoding='utf-8') as file:
214
- file.write(prompt + "\n\n" + response)
215
-
216
-
217
-
218
- def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
219
- """Create and save file with proper handling of different types."""
220
- if not should_save:
221
- return None
222
- filename = generate_filename(prompt if prompt else content, file_type)
223
- with open(filename, "w", encoding="utf-8") as f:
224
- if is_image:
225
- f.write(content)
226
- else:
227
- f.write(prompt + "\n\n" + content if prompt else content)
228
- return filename
229
-
230
- def get_download_link(file_path):
231
- """Create download link for file."""
232
- with open(file_path, "rb") as file:
233
- contents = file.read()
234
- b64 = base64.b64encode(contents).decode()
235
- return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}๐Ÿ“‚</a>'
236
-
237
- @st.cache_resource
238
- def SpeechSynthesis(result):
239
- """HTML5 Speech Synthesis."""
240
- documentHTML5 = f'''
241
- <!DOCTYPE html>
242
- <html>
243
- <head>
244
- <title>Read It Aloud</title>
245
- <script type="text/javascript">
246
- function readAloud() {{
247
- const text = document.getElementById("textArea").value;
248
- const speech = new SpeechSynthesisUtterance(text);
249
- window.speechSynthesis.speak(speech);
250
- }}
251
- </script>
252
- </head>
253
- <body>
254
- <h1>๐Ÿ”Š Read It Aloud</h1>
255
- <textarea id="textArea" rows="10" cols="80">{result}</textarea>
256
- <br>
257
- <button onclick="readAloud()">๐Ÿ”Š Read Aloud</button>
258
- </body>
259
- </html>
260
- '''
261
- components.html(documentHTML5, width=1280, height=300)
262
-
263
- # Media Processing Functions
264
- def process_image(image_input, user_prompt):
265
- """Process image with GPT-4o vision."""
266
- if isinstance(image_input, str):
267
- with open(image_input, "rb") as image_file:
268
- image_input = image_file.read()
269
-
270
- base64_image = base64.b64encode(image_input).decode("utf-8")
271
-
272
- response = openai_client.chat.completions.create(
273
- model=st.session_state["openai_model"],
274
- messages=[
275
- {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
276
- {"role": "user", "content": [
277
- {"type": "text", "text": user_prompt},
278
- {"type": "image_url", "image_url": {
279
- "url": f"data:image/png;base64,{base64_image}"
280
- }}
281
- ]}
282
- ],
283
- temperature=0.0,
284
- )
285
-
286
- return response.choices[0].message.content
287
-
288
- def process_audio(audio_input, text_input=''):
289
- """Process audio with Whisper and GPT."""
290
- if isinstance(audio_input, str):
291
- with open(audio_input, "rb") as file:
292
- audio_input = file.read()
293
-
294
- transcription = openai_client.audio.transcriptions.create(
295
- model="whisper-1",
296
- file=audio_input,
297
- )
298
-
299
- st.session_state.messages.append({"role": "user", "content": transcription.text})
300
-
301
- with st.chat_message("assistant"):
302
- st.markdown(transcription.text)
303
- SpeechSynthesis(transcription.text)
304
-
305
- filename = generate_filename(transcription.text, "wav")
306
- create_and_save_file(audio_input, "wav", transcription.text, True)
307
-
308
- def process_video(video_path, seconds_per_frame=1):
309
- """Process video files for frame extraction and audio."""
310
- base64Frames = []
311
- video = cv2.VideoCapture(video_path)
312
- total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
313
- fps = video.get(cv2.CAP_PROP_FPS)
314
- frames_to_skip = int(fps * seconds_per_frame)
315
-
316
- for frame_idx in range(0, total_frames, frames_to_skip):
317
- video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
318
- success, frame = video.read()
319
- if not success:
320
- break
321
- _, buffer = cv2.imencode(".jpg", frame)
322
- base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
323
-
324
- video.release()
325
-
326
- # Extract audio
327
- base_video_path = os.path.splitext(video_path)[0]
328
- audio_path = f"{base_video_path}.mp3"
329
- try:
330
- video_clip = VideoFileClip(video_path)
331
- video_clip.audio.write_audiofile(audio_path)
332
- video_clip.close()
333
- except:
334
- st.warning("No audio track found in video")
335
- audio_path = None
336
-
337
- return base64Frames, audio_path
338
-
339
- def process_video_with_gpt(video_input, user_prompt):
340
- """Process video with GPT-4o vision."""
341
- base64Frames, audio_path = process_video(video_input)
342
-
343
- response = openai_client.chat.completions.create(
344
- model=st.session_state["openai_model"],
345
- messages=[
346
- {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
347
- {"role": "user", "content": [
348
- {"type": "text", "text": user_prompt},
349
- *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
350
- for frame in base64Frames]
351
- ]}
352
- ]
353
- )
354
-
355
- return response.choices[0].message.content
356
-
357
-
358
- def extract_urls(text):
359
- try:
360
- date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
361
- abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
362
- pdf_link_pattern = re.compile(r'\[โฌ‡๏ธ\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
363
- title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
364
- date_matches = date_pattern.findall(text)
365
- abs_link_matches = abs_link_pattern.findall(text)
366
- pdf_link_matches = pdf_link_pattern.findall(text)
367
- title_matches = title_pattern.findall(text)
368
-
369
- # markdown with the extracted fields
370
- markdown_text = ""
371
- for i in range(len(date_matches)):
372
- date = date_matches[i]
373
- title = title_matches[i]
374
- abs_link = abs_link_matches[i][1]
375
- pdf_link = pdf_link_matches[i]
376
- markdown_text += f"**Date:** {date}\n\n"
377
- markdown_text += f"**Title:** {title}\n\n"
378
- markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
379
- markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
380
- markdown_text += "---\n\n"
381
- return markdown_text
382
-
383
- except:
384
- st.write('.')
385
- return ''
386
-
387
-
388
- def search_arxiv(query):
389
-
390
- st.write("Performing AI Lookup...")
391
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
392
-
393
- result1 = client.predict(
394
- prompt=query,
395
- llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
396
- stream_outputs=True,
397
- api_name="/ask_llm"
398
- )
399
- st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
400
- st.markdown(result1)
401
-
402
- result2 = client.predict(
403
- prompt=query,
404
- llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
405
- stream_outputs=True,
406
- api_name="/ask_llm"
407
- )
408
- st.markdown("### Mistral-7B-Instruct-v0.2 Result")
409
- st.markdown(result2)
410
- combined_result = f"{result1}\n\n{result2}"
411
- return combined_result
412
-
413
- #return responseall
414
-
415
-
416
- # Function to generate a filename based on prompt and time (because names matter ๐Ÿ•’)
417
- def generate_filename(prompt, file_type):
418
- central = pytz.timezone('US/Central')
419
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
420
- safe_prompt = re.sub(r'\W+', '_', prompt)[:90]
421
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
422
-
423
- # Function to create and save a file (and avoid the black hole of lost data ๐Ÿ•ณ)
424
- def create_file(filename, prompt, response):
425
- with open(filename, 'w', encoding='utf-8') as file:
426
- file.write(prompt + "\n\n" + response)
427
-
428
-
429
- def perform_ai_lookup(query):
430
- start_time = time.strftime("%Y-%m-%d %H:%M:%S")
431
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
432
- response1 = client.predict(
433
- query,
434
- 20,
435
- "Semantic Search",
436
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
437
- api_name="/update_with_rag_md"
438
- )
439
- Question = '### ๐Ÿ”Ž ' + query + '\r\n' # Format for markdown display with links
440
- References = response1[0]
441
- ReferenceLinks = extract_urls(References)
442
-
443
- RunSecondQuery = True
444
- results=''
445
- if RunSecondQuery:
446
- # Search 2 - Retrieve the Summary with Papers Context and Original Query
447
- response2 = client.predict(
448
- query,
449
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
450
- True,
451
- api_name="/ask_llm"
452
- )
453
- if len(response2) > 10:
454
- Answer = response2
455
- SpeechSynthesis(Answer)
456
- # Restructure results to follow format of Question, Answer, References, ReferenceLinks
457
- results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
458
- st.markdown(results)
459
-
460
- st.write('๐Ÿ”Run of Multi-Agent System Paper Summary Spec is Complete')
461
- end_time = time.strftime("%Y-%m-%d %H:%M:%S")
462
- start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
463
- end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
464
- elapsed_seconds = end_timestamp - start_timestamp
465
- st.write(f"Start time: {start_time}")
466
- st.write(f"Finish time: {end_time}")
467
- st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
468
-
469
-
470
- filename = generate_filename(query, "md")
471
- create_file(filename, query, results)
472
- return results
473
-
474
- # Chat Processing Functions
475
- def process_with_gpt(text_input):
476
- """Process text with GPT-4o."""
477
- if text_input:
478
- st.session_state.messages.append({"role": "user", "content": text_input})
479
-
480
- with st.chat_message("user"):
481
- st.markdown(text_input)
482
-
483
- with st.chat_message("assistant"):
484
- completion = openai_client.chat.completions.create(
485
- model=st.session_state["openai_model"],
486
- messages=[
487
- {"role": m["role"], "content": m["content"]}
488
- for m in st.session_state.messages
489
- ],
490
- stream=False
491
- )
492
- return_text = completion.choices[0].message.content
493
- st.write("GPT-4o: " + return_text)
494
-
495
- #filename = generate_filename(text_input, "md")
496
- filename = generate_filename("GPT-4o: " + return_text, "md")
497
- create_file(filename, text_input, return_text)
498
- st.session_state.messages.append({"role": "assistant", "content": return_text})
499
- return return_text
500
-
501
- def process_with_claude(text_input):
502
- """Process text with Claude."""
503
- if text_input:
504
-
505
- with st.chat_message("user"):
506
- st.markdown(text_input)
507
-
508
- with st.chat_message("assistant"):
509
- response = claude_client.messages.create(
510
- model="claude-3-sonnet-20240229",
511
- max_tokens=1000,
512
- messages=[
513
- {"role": "user", "content": text_input}
514
- ]
515
- )
516
- response_text = response.content[0].text
517
- st.write("Claude: " + response_text)
518
-
519
- #filename = generate_filename(text_input, "md")
520
- filename = generate_filename("Claude: " + response_text, "md")
521
- create_file(filename, text_input, response_text)
522
-
523
- st.session_state.chat_history.append({
524
- "user": text_input,
525
- "claude": response_text
526
- })
527
- return response_text
528
-
529
- # File Management Functions
530
- def load_file(file_name):
531
- """Load file content."""
532
- with open(file_name, "r", encoding='utf-8') as file:
533
- content = file.read()
534
- return content
535
-
536
- def create_zip_of_files(files):
537
- """Create zip archive of files."""
538
- zip_name = "all_files.zip"
539
- with zipfile.ZipFile(zip_name, 'w') as zipf:
540
- for file in files:
541
- zipf.write(file)
542
- return zip_name
543
-
544
-
545
-
546
- def get_media_html(media_path, media_type="video", width="100%"):
547
- """Generate HTML for media player."""
548
- media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
549
- if media_type == "video":
550
- return f'''
551
- <video width="{width}" controls autoplay muted loop>
552
- <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
553
- Your browser does not support the video tag.
554
- </video>
555
- '''
556
- else: # audio
557
- return f'''
558
- <audio controls style="width: {width};">
559
- <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
560
- Your browser does not support the audio element.
561
- </audio>
562
- '''
563
-
564
- def create_media_gallery():
565
- """Create the media gallery interface."""
566
- st.header("๐ŸŽฌ Media Gallery")
567
-
568
- tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video", "๐ŸŽจ Scene Generator"])
569
-
570
- with tabs[0]:
571
- image_files = glob.glob("*.png") + glob.glob("*.jpg")
572
- if image_files:
573
- num_cols = st.slider("Number of columns", 1, 5, 3)
574
- cols = st.columns(num_cols)
575
- for idx, image_file in enumerate(image_files):
576
- with cols[idx % num_cols]:
577
- img = Image.open(image_file)
578
- st.image(img, use_container_width=True)
579
-
580
- # Add GPT vision analysis option
581
- if st.button(f"Analyze {os.path.basename(image_file)}"):
582
- analysis = process_image(image_file,
583
- "Describe this image in detail and identify key elements.")
584
- st.markdown(analysis)
585
-
586
- with tabs[1]:
587
- audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
588
- for audio_file in audio_files:
589
- with st.expander(f"๐ŸŽต {os.path.basename(audio_file)}"):
590
- st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
591
- if st.button(f"Transcribe {os.path.basename(audio_file)}"):
592
- with open(audio_file, "rb") as f:
593
- transcription = process_audio(f)
594
- st.write(transcription)
595
-
596
- with tabs[2]:
597
- video_files = glob.glob("*.mp4")
598
- for video_file in video_files:
599
- with st.expander(f"๐ŸŽฅ {os.path.basename(video_file)}"):
600
- st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
601
- if st.button(f"Analyze {os.path.basename(video_file)}"):
602
- analysis = process_video_with_gpt(video_file,
603
- "Describe what's happening in this video.")
604
- st.markdown(analysis)
605
-
606
- with tabs[3]:
607
- for collection_name, bikes in bike_collections.items():
608
- st.subheader(collection_name)
609
- cols = st.columns(len(bikes))
610
-
611
- for idx, (bike_name, details) in enumerate(bikes.items()):
612
- with cols[idx]:
613
- st.markdown(f"""
614
- <div class='bike-card'>
615
- <h3>{details['emoji']} {bike_name}</h3>
616
- <p>{details['prompt']}</p>
617
- </div>
618
- """, unsafe_allow_html=True)
619
-
620
- if st.button(f"Generate {bike_name} Scene"):
621
- prompt = details['prompt']
622
- # Here you could integrate with image generation API
623
- st.write(f"Generated scene description for {bike_name}:")
624
- st.write(prompt)
625
-
626
- def display_file_manager():
627
- """Display file management sidebar with guaranteed unique button keys."""
628
- st.sidebar.title("๐Ÿ“ File Management")
629
-
630
- all_files = glob.glob("*.md")
631
- all_files.sort(reverse=True)
632
-
633
- if st.sidebar.button("๐Ÿ—‘ Delete All", key="delete_all_files_button"):
634
- for file in all_files:
635
- os.remove(file)
636
- st.rerun()
637
-
638
- if st.sidebar.button("โฌ‡๏ธ Download All", key="download_all_files_button"):
639
- zip_file = create_zip_of_files(all_files)
640
- st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
641
-
642
- # Create unique keys using file attributes
643
- for idx, file in enumerate(all_files):
644
- # Get file stats for unique identification
645
- file_stat = os.stat(file)
646
- unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
647
-
648
- col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
649
- with col1:
650
- if st.button("๐ŸŒ", key=f"view_{unique_id}"):
651
- st.session_state.current_file = file
652
- st.session_state.file_content = load_file(file)
653
- with col2:
654
- st.markdown(get_download_link(file), unsafe_allow_html=True)
655
- with col3:
656
- if st.button("๐Ÿ“‚", key=f"edit_{unique_id}"):
657
- st.session_state.current_file = file
658
- st.session_state.file_content = load_file(file)
659
- with col4:
660
- if st.button("๐Ÿ—‘", key=f"delete_{unique_id}"):
661
- os.remove(file)
662
- st.rerun()
663
-
664
-
665
- def main():
666
- st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
667
-
668
- # Main navigation
669
- tab_main = st.radio("Choose Action:",
670
- ["๐Ÿ’ฌ Chat", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
671
- horizontal=True)
672
-
673
- if tab_main == "๐Ÿ’ฌ Chat":
674
- # Model Selection
675
- model_choice = st.sidebar.radio(
676
- "Choose AI Model:",
677
- ["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
678
- )
679
-
680
- # Chat Interface
681
- user_input = st.text_area("Message:", height=100)
682
-
683
- if st.button("Send ๐Ÿ“จ"):
684
- if user_input:
685
- if model_choice == "GPT-4o":
686
- gpt_response = process_with_gpt(user_input)
687
- elif model_choice == "Claude-3":
688
- claude_response = process_with_claude(user_input)
689
- else: # Both
690
- col1, col2, col3 = st.columns(3)
691
- with col2:
692
- st.subheader("Claude-3.5 Sonnet:")
693
- try:
694
- claude_response = process_with_claude(user_input)
695
- except:
696
- st.write('Claude 3.5 Sonnet out of tokens.')
697
- with col1:
698
- st.subheader("GPT-4o Omni:")
699
- try:
700
- gpt_response = process_with_gpt(user_input)
701
- except:
702
- st.write('GPT 4o out of tokens')
703
- with col3:
704
- st.subheader("Arxiv and Mistral Research:")
705
- with st.spinner("Searching ArXiv..."):
706
- #results = search_arxiv(user_input)
707
- results = perform_ai_lookup(user_input)
708
-
709
- st.markdown(results)
710
-
711
- # Display Chat History
712
- st.subheader("Chat History ๐Ÿ“œ")
713
- tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
714
-
715
- with tab1:
716
- for chat in st.session_state.chat_history:
717
- st.text_area("You:", chat["user"], height=100)
718
- st.text_area("Claude:", chat["claude"], height=200)
719
- st.markdown(chat["claude"])
720
-
721
- with tab2:
722
- for message in st.session_state.messages:
723
- with st.chat_message(message["role"]):
724
- st.markdown(message["content"])
725
-
726
- elif tab_main == "๐Ÿ“ธ Media Gallery":
727
- create_media_gallery()
728
-
729
- elif tab_main == "๐Ÿ” Search ArXiv":
730
- query = st.text_input("Enter your research query:")
731
- if query:
732
- with st.spinner("Searching ArXiv..."):
733
- results = search_arxiv(query)
734
- st.markdown(results)
735
-
736
- elif tab_main == "๐Ÿ“ File Editor":
737
- if hasattr(st.session_state, 'current_file'):
738
- st.subheader(f"Editing: {st.session_state.current_file}")
739
- new_content = st.text_area("Content:", st.session_state.file_content, height=300)
740
- if st.button("Save Changes"):
741
- with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
742
- file.write(new_content)
743
- st.success("File updated successfully!")
744
-
745
- # Always show file manager in sidebar
746
- display_file_manager()
747
-
748
- if __name__ == "__main__":
749
- main()
750
-
751
- # Speech Recognition HTML Component
752
  speech_recognition_html = """
753
  <!DOCTYPE html>
754
  <html>
755
  <head>
756
  <title>Continuous Speech Demo</title>
757
- <style>
758
- body {
759
- font-family: sans-serif;
760
- padding: 20px;
761
- max-width: 800px;
762
- margin: 0 auto;
763
- }
764
- button {
765
- padding: 10px 20px;
766
- margin: 10px 5px;
767
- font-size: 16px;
768
- }
769
- #status {
770
- margin: 10px 0;
771
- padding: 10px;
772
- background: #e8f5e9;
773
- border-radius: 4px;
774
- }
775
- #output {
776
- white-space: pre-wrap;
777
- padding: 15px;
778
- background: #f5f5f5;
779
- border-radius: 4px;
780
- margin: 10px 0;
781
- min-height: 100px;
782
- max-height: 400px;
783
- overflow-y: auto;
784
- }
785
- .controls {
786
- margin: 10px 0;
787
- }
788
- </style>
789
- </head>
790
- <body>
791
- <div class="controls">
792
- <button id="start">Start Listening</button>
793
- <button id="stop" disabled>Stop Listening</button>
794
- <button id="clear">Clear Text</button>
795
- </div>
796
- <div id="status">Ready</div>
797
- <div id="output"></div>
798
-
799
  <script>
800
  if (!('webkitSpeechRecognition' in window)) {
801
  alert('Speech recognition not supported');
802
  } else {
803
  const recognition = new webkitSpeechRecognition();
804
- const startButton = document.getElementById('start');
805
- const stopButton = document.getElementById('stop');
806
- const clearButton = document.getElementById('clear');
807
- const status = document.getElementById('status');
808
- const output = document.getElementById('output');
809
  let fullTranscript = '';
810
- let lastUpdateTime = Date.now();
811
-
812
- // Configure recognition
813
  recognition.continuous = true;
814
  recognition.interimResults = true;
815
 
816
- // Function to start recognition
817
- const startRecognition = () => {
818
- try {
819
- recognition.start();
820
- status.textContent = 'Listening...';
821
- startButton.disabled = true;
822
- stopButton.disabled = false;
823
- } catch (e) {
824
- console.error(e);
825
- status.textContent = 'Error: ' + e.message;
826
- }
827
- };
828
-
829
- // Auto-start on load
830
- window.addEventListener('load', () => {
831
- setTimeout(startRecognition, 1000);
832
- });
833
-
834
- startButton.onclick = startRecognition;
835
-
836
- stopButton.onclick = () => {
837
- recognition.stop();
838
- status.textContent = 'Stopped';
839
- startButton.disabled = false;
840
- stopButton.disabled = true;
841
- };
842
-
843
- clearButton.onclick = () => {
844
- fullTranscript = '';
845
- output.textContent = '';
846
- window.parent.postMessage({
847
- type: 'clear_transcript',
848
- }, '*');
849
- };
850
-
851
  recognition.onresult = (event) => {
852
- let interimTranscript = '';
853
  let finalTranscript = '';
854
-
855
  for (let i = event.resultIndex; i < event.results.length; i++) {
856
- const transcript = event.results[i][0].transcript;
857
  if (event.results[i].isFinal) {
858
- finalTranscript += transcript + '\\n';
859
- } else {
860
- interimTranscript += transcript;
861
  }
862
  }
863
-
864
- if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
865
- if (finalTranscript) {
866
- fullTranscript += finalTranscript;
867
- // Send to Streamlit
868
- window.parent.postMessage({
869
- type: 'final_transcript',
870
- text: finalTranscript
871
- }, '*');
872
- }
873
- lastUpdateTime = Date.now();
874
  }
875
-
876
- output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
877
- output.scrollTop = output.scrollHeight;
878
  };
879
 
880
- recognition.onend = () => {
881
- if (!stopButton.disabled) {
882
- try {
883
- recognition.start();
884
- console.log('Restarted recognition');
885
- } catch (e) {
886
- console.error('Failed to restart recognition:', e);
887
- status.textContent = 'Error restarting: ' + e.message;
888
- startButton.disabled = false;
889
- stopButton.disabled = true;
890
- }
891
- }
892
- };
893
-
894
- recognition.onerror = (event) => {
895
- console.error('Recognition error:', event.error);
896
- status.textContent = 'Error: ' + event.error;
897
-
898
- if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
899
- startButton.disabled = false;
900
- stopButton.disabled = true;
901
- }
902
- };
903
  }
904
  </script>
905
- </body>
 
906
  </html>
907
  """
908
 
909
  # Helper Functions
910
- def generate_filename(prompt, file_type):
911
- central = pytz.timezone('US/Central')
912
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
913
- replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
914
- safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
915
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
916
-
917
- # File Management Functions
918
- def load_file(file_name):
919
- """Load file content."""
920
- with open(file_name, "r", encoding='utf-8') as file:
921
- content = file.read()
922
- return content
923
 
924
- def create_zip_of_files(files):
925
- """Create zip archive of files."""
926
- zip_name = "all_files.zip"
927
- with zipfile.ZipFile(zip_name, 'w') as zipf:
928
- for file in files:
929
- zipf.write(file)
930
- return zip_name
 
931
 
932
- def get_download_link(file):
933
- """Create download link for file."""
934
- with open(file, "rb") as f:
935
- contents = f.read()
936
- b64 = base64.b64encode(contents).decode()
937
- return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">Download {os.path.basename(file)}๐Ÿ“‚</a>'
 
938
 
939
  def display_file_manager():
940
  """Display file management sidebar."""
941
  st.sidebar.title("๐Ÿ“ File Management")
942
-
943
  all_files = glob.glob("*.md")
944
  all_files.sort(reverse=True)
945
 
@@ -949,189 +126,61 @@ def display_file_manager():
949
  st.rerun()
950
 
951
  if st.sidebar.button("โฌ‡๏ธ Download All"):
952
- zip_file = create_zip_of_files(all_files)
953
- st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
 
 
954
 
955
  for file in all_files:
956
- col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
957
- with col1:
958
- if st.button("๐ŸŒ", key="view_"+file):
959
- st.session_state.current_file = file
960
- st.session_state.file_content = load_file(file)
961
- with col2:
962
- st.markdown(get_download_link(file), unsafe_allow_html=True)
963
- with col3:
964
- if st.button("๐Ÿ“‚", key="edit_"+file):
965
- st.session_state.current_file = file
966
- st.session_state.file_content = load_file(file)
967
- with col4:
968
- if st.button("๐Ÿ—‘", key="delete_"+file):
969
- os.remove(file)
970
- st.rerun()
971
-
972
- def create_media_gallery():
973
- """Create the media gallery interface."""
974
- st.header("๐ŸŽฌ Media Gallery")
975
-
976
- tabs = st.tabs(["๐Ÿ–ผ๏ธ Images", "๐ŸŽต Audio", "๐ŸŽฅ Video", "๐ŸŽจ Scene Generator"])
977
-
978
- with tabs[0]:
979
- image_files = glob.glob("*.png") + glob.glob("*.jpg")
980
- if image_files:
981
- num_cols = st.slider("Number of columns", 1, 5, 3)
982
- cols = st.columns(num_cols)
983
- for idx, image_file in enumerate(image_files):
984
- with cols[idx % num_cols]:
985
- img = Image.open(image_file)
986
- st.image(img, use_container_width=True)
987
-
988
- # Add GPT vision analysis option
989
- if st.button(f"Analyze {os.path.basename(image_file)}"):
990
- analysis = process_image(image_file,
991
- "Describe this image in detail and identify key elements.")
992
- st.markdown(analysis)
993
-
994
- with tabs[1]:
995
- audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
996
- for audio_file in audio_files:
997
- with st.expander(f"๐ŸŽต {os.path.basename(audio_file)}"):
998
- st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
999
- if st.button(f"Transcribe {os.path.basename(audio_file)}"):
1000
- with open(audio_file, "rb") as f:
1001
- transcription = process_audio(f)
1002
- st.write(transcription)
1003
-
1004
- with tabs[2]:
1005
- video_files = glob.glob("*.mp4")
1006
- for video_file in video_files:
1007
- with st.expander(f"๐ŸŽฅ {os.path.basename(video_file)}"):
1008
- st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
1009
- if st.button(f"Analyze {os.path.basename(video_file)}"):
1010
- analysis = process_video_with_gpt(video_file,
1011
- "Describe what's happening in this video.")
1012
- st.markdown(analysis)
1013
-
1014
- with tabs[3]:
1015
- for collection_name, bikes in bike_collections.items():
1016
- st.subheader(collection_name)
1017
- cols = st.columns(len(bikes))
1018
-
1019
- for idx, (bike_name, details) in enumerate(bikes.items()):
1020
- with cols[idx]:
1021
- st.markdown(f"""
1022
- <div class='bike-card'>
1023
- <h3>{details['emoji']} {bike_name}</h3>
1024
- <p>{details['prompt']}</p>
1025
- </div>
1026
- """, unsafe_allow_html=True)
1027
-
1028
- if st.button(f"Generate {bike_name} Scene"):
1029
- prompt = details['prompt']
1030
- # Here you could integrate with image generation API
1031
- st.write(f"Generated scene description for {bike_name}:")
1032
- st.write(prompt)
1033
 
1034
- def get_media_html(media_path, media_type="video", width="100%"):
1035
- """Generate HTML for media player."""
1036
- media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
1037
- if media_type == "video":
1038
- return f'''
1039
- <video width="{width}" controls autoplay muted loop>
1040
- <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
1041
- Your browser does not support the video tag.
1042
- </video>
1043
- '''
1044
- else: # audio
1045
- return f'''
1046
- <audio controls style="width: {width};">
1047
- <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
1048
- Your browser does not support the audio element.
1049
- </audio>
1050
- '''
1051
-
1052
  def main():
1053
  st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
1054
-
1055
- # Main navigation
1056
- tab_main = st.radio("Choose Action:",
1057
- ["๐ŸŽค Voice Input", "๐Ÿ’ฌ Chat", "๐Ÿ“ธ Media Gallery", "๐Ÿ” Search ArXiv", "๐Ÿ“ File Editor"],
1058
- horizontal=True)
1059
-
1060
  if tab_main == "๐ŸŽค Voice Input":
1061
  st.subheader("Voice Recognition")
1062
-
1063
- # Display speech recognition component
1064
- speech_component = st.components.v1.html(speech_recognition_html, height=400)
1065
-
1066
- # Handle speech recognition output
1067
- if speech_component:
1068
- try:
1069
- data = speech_component
1070
- if isinstance(data, dict):
1071
- if data.get('type') == 'final_transcript':
1072
- text = data.get('text', '').strip()
1073
- if text:
1074
- st.session_state.last_voice_input = text
1075
-
1076
- # Process voice input with AI
1077
- st.subheader("AI Response to Voice Input:")
1078
-
1079
- col1, col2, col3 = st.columns(3)
1080
- with col2:
1081
- st.write("Claude-3.5 Sonnet:")
1082
- try:
1083
- claude_response = process_with_claude(text)
1084
- except:
1085
- st.write('Claude 3.5 Sonnet out of tokens.')
1086
- with col1:
1087
- st.write("GPT-4o Omni:")
1088
- try:
1089
- gpt_response = process_with_gpt(text)
1090
- except:
1091
- st.write('GPT 4o out of tokens')
1092
- with col3:
1093
- st.write("Arxiv and Mistral Research:")
1094
- with st.spinner("Searching ArXiv..."):
1095
- results = perform_ai_lookup(text)
1096
- st.markdown(results)
1097
-
1098
- elif data.get('type') == 'clear_transcript':
1099
- st.session_state.last_voice_input = ""
1100
- st.experimental_rerun()
1101
-
1102
- except Exception as e:
1103
- st.error(f"Error processing voice input: {e}")
1104
-
1105
- # Display last voice input
1106
- if st.session_state.last_voice_input:
1107
- st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
1108
-
1109
- # [Rest of the main function remains the same]
1110
  elif tab_main == "๐Ÿ’ฌ Chat":
1111
- # [Previous chat interface code]
1112
- pass
1113
-
1114
- elif tab_main == "๐Ÿ“ธ Media Gallery":
1115
- create_media_gallery()
1116
-
 
 
 
1117
  elif tab_main == "๐Ÿ” Search ArXiv":
1118
  query = st.text_input("Enter your research query:")
1119
  if query:
1120
- with st.spinner("Searching ArXiv..."):
1121
- results = search_arxiv(query)
1122
- st.markdown(results)
1123
-
1124
- elif tab_main == "๐Ÿ“ File Editor":
1125
- if hasattr(st.session_state, 'current_file'):
1126
- st.subheader(f"Editing: {st.session_state.current_file}")
1127
- new_content = st.text_area("Content:", st.session_state.file_content, height=300)
1128
- if st.button("Save Changes"):
1129
- with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
1130
- file.write(new_content)
1131
- st.success("File updated successfully!")
1132
 
1133
- # Always show file manager in sidebar
1134
  display_file_manager()
1135
 
1136
  if __name__ == "__main__":
1137
- main()
 
3
  import openai
4
  import base64
5
  from datetime import datetime
 
 
 
 
 
 
6
  import pytz
7
+ import os
8
  import re
 
 
 
9
  import time
10
  import zipfile
 
 
 
 
 
 
11
  from io import BytesIO
12
+ from gradio_client import Client
13
  from PIL import Image
14
+ import glob
15
+ import streamlit.components.v1 as components
 
 
16
 
17
  # 1. Configuration and Setup
18
  Site_Name = '๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI'
 
34
  )
35
 
36
  # 2. Load environment variables and initialize clients
 
 
 
37
  openai.api_key = os.getenv('OPENAI_API_KEY')
 
 
 
 
 
 
 
 
 
38
  anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
 
 
 
39
 
40
+ openai_client = openai.OpenAI(api_key=openai.api_key)
41
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  # Initialize session states
44
+ if 'voice_transcript' not in st.session_state:
45
+ st.session_state.voice_transcript = ""
46
  if "chat_history" not in st.session_state:
47
  st.session_state.chat_history = []
 
 
48
  if "messages" not in st.session_state:
49
  st.session_state.messages = []
50
 
51
+ # 3. Speech Recognition HTML Component
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  speech_recognition_html = """
53
  <!DOCTYPE html>
54
  <html>
55
  <head>
56
  <title>Continuous Speech Demo</title>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  <script>
58
  if (!('webkitSpeechRecognition' in window)) {
59
  alert('Speech recognition not supported');
60
  } else {
61
  const recognition = new webkitSpeechRecognition();
 
 
 
 
 
62
  let fullTranscript = '';
 
 
 
63
  recognition.continuous = true;
64
  recognition.interimResults = true;
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  recognition.onresult = (event) => {
 
67
  let finalTranscript = '';
 
68
  for (let i = event.resultIndex; i < event.results.length; i++) {
 
69
  if (event.results[i].isFinal) {
70
+ finalTranscript += event.results[i][0].transcript + '\\n';
 
 
71
  }
72
  }
73
+ if (finalTranscript) {
74
+ fullTranscript += finalTranscript;
75
+ window.parent.postMessage({
76
+ type: 'final_transcript',
77
+ text: finalTranscript
78
+ }, '*');
 
 
 
 
 
79
  }
 
 
 
80
  };
81
 
82
+ recognition.start();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  }
84
  </script>
85
+ </head>
86
+ <body></body>
87
  </html>
88
  """
89
 
90
  # Helper Functions
91
+ def process_with_gpt(text_input):
92
+ if text_input:
93
+ completion = openai_client.Completion.create(
94
+ model="gpt-4o-2024-05-13",
95
+ messages=[{"role": "user", "content": text_input}],
96
+ max_tokens=500,
97
+ )
98
+ return completion.choices[0].text
 
 
 
 
 
99
 
100
+ def process_with_claude(text_input):
101
+ if text_input:
102
+ response = claude_client.Completion.create(
103
+ model="claude-3-sonnet-20240229",
104
+ messages=[{"role": "user", "content": text_input}],
105
+ max_tokens=1000,
106
+ )
107
+ return response.content[0].text
108
 
109
+ def perform_ai_lookup(query):
110
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
111
+ response = client.predict(
112
+ prompt=query,
113
+ api_name="/ask_llm"
114
+ )
115
+ return response
116
 
117
  def display_file_manager():
118
  """Display file management sidebar."""
119
  st.sidebar.title("๐Ÿ“ File Management")
 
120
  all_files = glob.glob("*.md")
121
  all_files.sort(reverse=True)
122
 
 
126
  st.rerun()
127
 
128
  if st.sidebar.button("โฌ‡๏ธ Download All"):
129
+ with zipfile.ZipFile("all_files.zip", 'w') as zipf:
130
+ for file in all_files:
131
+ zipf.write(file)
132
+ st.sidebar.markdown(f'<a href="all_files.zip" download>Download All Files</a>', unsafe_allow_html=True)
133
 
134
  for file in all_files:
135
+ st.sidebar.write(file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
+ # Main Function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  def main():
139
  st.sidebar.markdown("### ๐ŸšฒBikeAI๐Ÿ† Claude and GPT Multi-Agent Research AI")
140
+ tab_main = st.radio("Choose Action:", ["๐ŸŽค Voice Input", "๐Ÿ’ฌ Chat", "๐Ÿ” Search ArXiv"], horizontal=True)
141
+
 
 
 
 
142
  if tab_main == "๐ŸŽค Voice Input":
143
  st.subheader("Voice Recognition")
144
+ st.components.v1.html(speech_recognition_html, height=300)
145
+
146
+ if st.session_state.voice_transcript:
147
+ st.text_area("Transcript", st.session_state.voice_transcript, height=100)
148
+
149
+ if st.button("Search with GPT"):
150
+ st.subheader("GPT-4o Response")
151
+ gpt_response = process_with_gpt(st.session_state.voice_transcript)
152
+ st.write(gpt_response)
153
+
154
+ if st.button("Search with Claude"):
155
+ st.subheader("Claude Response")
156
+ claude_response = process_with_claude(st.session_state.voice_transcript)
157
+ st.write(claude_response)
158
+
159
+ if st.button("Search ArXiv"):
160
+ st.subheader("ArXiv Search Results")
161
+ arxiv_results = perform_ai_lookup(st.session_state.voice_transcript)
162
+ st.write(arxiv_results)
163
+
164
+ if st.button("Clear Transcript"):
165
+ st.session_state.voice_transcript = ""
166
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  elif tab_main == "๐Ÿ’ฌ Chat":
168
+ st.subheader("Chat")
169
+ user_input = st.text_area("Your Message", height=100)
170
+ if st.button("Send"):
171
+ if user_input:
172
+ gpt_response = process_with_gpt(user_input)
173
+ st.write("GPT Response:", gpt_response)
174
+ claude_response = process_with_claude(user_input)
175
+ st.write("Claude Response:", claude_response)
176
+
177
  elif tab_main == "๐Ÿ” Search ArXiv":
178
  query = st.text_input("Enter your research query:")
179
  if query:
180
+ results = perform_ai_lookup(query)
181
+ st.write(results)
 
 
 
 
 
 
 
 
 
 
182
 
 
183
  display_file_manager()
184
 
185
  if __name__ == "__main__":
186
+ main()