awacke1 commited on
Commit
b9432f3
Β·
verified Β·
1 Parent(s): 9cd5dd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -446
app.py CHANGED
@@ -1,525 +1,254 @@
1
  import streamlit as st
2
- import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
3
- import plotly.graph_objects as go
4
- import streamlit.components.v1 as components
5
  from datetime import datetime
6
- from audio_recorder_streamlit import audio_recorder
7
- from bs4 import BeautifulSoup
8
- from collections import defaultdict, deque
9
  from dotenv import load_dotenv
10
  from gradio_client import Client
11
- from huggingface_hub import InferenceClient
12
- from io import BytesIO
13
  from PIL import Image
14
- from PyPDF2 import PdfReader
15
- from urllib.parse import quote
16
- from xml.etree import ElementTree as ET
17
  from openai import OpenAI
18
- import extra_streamlit_components as stx
19
- from streamlit.runtime.scriptrunner import get_script_run_ctx
20
- import asyncio
21
- import edge_tts
22
 
23
- # πŸ”§ Config & Setup
24
  st.set_page_config(
25
- page_title="🚲BikeAIπŸ† Claude/GPT Research",
26
- page_icon="πŸš²πŸ†",
27
- layout="wide",
28
- initial_sidebar_state="auto",
29
- menu_items={
30
- 'Get Help': 'https://huggingface.co/awacke1',
31
- 'Report a bug': 'https://huggingface.co/spaces/awacke1',
32
- 'About': "🚲BikeAIπŸ† Claude/GPT Research AI"
33
- }
34
  )
35
  load_dotenv()
36
 
37
- openai_api_key = os.getenv('OPENAI_API_KEY', "")
38
- anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
39
- if 'OPENAI_API_KEY' in st.secrets:
40
- openai_api_key = st.secrets['OPENAI_API_KEY']
41
- if 'ANTHROPIC_API_KEY' in st.secrets:
42
- anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
43
-
44
- openai.api_key = openai_api_key
45
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
46
- openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
47
- HF_KEY = os.getenv('HF_KEY')
48
- API_URL = os.getenv('API_URL')
49
-
50
- if 'transcript_history' not in st.session_state:
51
- st.session_state['transcript_history'] = []
52
- if 'chat_history' not in st.session_state:
53
- st.session_state['chat_history'] = []
54
- if 'openai_model' not in st.session_state:
55
- st.session_state['openai_model'] = "gpt-4o-2024-05-13"
56
- if 'messages' not in st.session_state:
57
- st.session_state['messages'] = []
58
- if 'last_voice_input' not in st.session_state:
59
- st.session_state['last_voice_input'] = ""
60
- if 'editing_file' not in st.session_state:
61
- st.session_state['editing_file'] = None
62
- if 'edit_new_name' not in st.session_state:
63
- st.session_state['edit_new_name'] = ""
64
- if 'edit_new_content' not in st.session_state:
65
- st.session_state['edit_new_content'] = ""
66
- if 'viewing_prefix' not in st.session_state:
67
- st.session_state['viewing_prefix'] = None
68
- if 'should_rerun' not in st.session_state:
69
- st.session_state['should_rerun'] = False
70
- if 'old_val' not in st.session_state:
71
- st.session_state['old_val'] = None
72
-
73
- # 🎨 Minimal Custom CSS
74
- st.markdown("""
75
- <style>
76
- .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
77
- .stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
78
- .stButton>button {
79
- margin-right: 0.5rem;
80
- }
81
- </style>
82
- """, unsafe_allow_html=True)
83
-
84
- FILE_EMOJIS = {
85
- "md": "πŸ“",
86
- "mp3": "🎡",
87
- }
88
-
89
- def clean_for_speech(text: str) -> str:
90
- text = text.replace("\n", " ")
91
- text = text.replace("</s>", " ")
92
- text = text.replace("#", "")
93
- # Remove links
94
- text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
95
- text = re.sub(r"\s+", " ", text).strip()
96
- return text
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  def generate_filename(content, file_type="md"):
 
99
  prefix = datetime.now().strftime("%y%m_%H%M") + "_"
100
- words = re.findall(r"\w+", content)
101
- name_text = '_'.join(words[:3]) if words else 'file'
102
- filename = f"{prefix}{name_text}.{file_type}"
103
- return filename
104
 
105
  def create_file(prompt, response, file_type="md"):
106
- filename = generate_filename(response.strip() if response.strip() else prompt.strip(), file_type)
 
107
  with open(filename, 'w', encoding='utf-8') as f:
108
- f.write(prompt + "\n\n" + response)
109
  return filename
110
 
111
- def get_download_link(file):
112
- with open(file, "rb") as f:
113
- b64 = base64.b64encode(f.read()).decode()
114
- return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
115
-
116
- @st.cache_resource
117
- def speech_synthesis_html(result):
118
- html_code = f"""
119
- <html><body>
120
- <script>
121
- var msg = new SpeechSynthesisUtterance("{result.replace('"', '')}");
122
- window.speechSynthesis.speak(msg);
123
- </script>
124
- </body></html>
125
- """
126
- components.html(html_code, height=0)
 
 
 
 
 
 
 
127
 
128
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
 
129
  text = clean_for_speech(text)
130
  if not text.strip():
131
  return None
132
- rate_str = f"{rate:+d}%"
133
- pitch_str = f"{pitch:+d}Hz"
134
- communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
135
- out_fn = generate_filename(text,"mp3")
136
  await communicate.save(out_fn)
137
  return out_fn
138
 
139
  def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
 
140
  return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
141
 
142
- def play_and_download_audio(file_path):
143
- if file_path and os.path.exists(file_path):
144
- st.audio(file_path)
145
- dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
146
- st.markdown(dl_link, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
147
 
 
148
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False):
 
149
  start = time.time()
150
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
151
- r = client.predict(q,20,"Semantic Search","mistralai/Mixtral-8x7B-Instruct-v0.1",api_name="/update_with_rag_md")
152
- refs = r[0]
153
- r2 = client.predict(q,"mistralai/Mixtral-8x7B-Instruct-v0.1",True,api_name="/ask_llm")
154
- result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
155
 
156
  st.markdown(result)
157
 
158
- # Generate full audio version if requested
159
  if full_audio:
160
- complete_text = f"Complete response for query: {q}. {clean_for_speech(r2)} {clean_for_speech(refs)}"
161
- audio_file_full = speak_with_edge_tts(complete_text)
162
  st.write("### πŸ“š Complete Audio Response")
163
- play_and_download_audio(audio_file_full)
164
 
165
  if vocal_summary:
166
- main_text = clean_for_speech(r2)
167
- audio_file_main = speak_with_edge_tts(main_text)
168
- st.write("### πŸŽ™οΈ Vocal Summary (Short Answer)")
169
- play_and_download_audio(audio_file_main)
170
 
171
  if extended_refs:
172
- summaries_text = "Here are the summaries from the references: " + refs.replace('"','')
173
- summaries_text = clean_for_speech(summaries_text)
174
- audio_file_refs = speak_with_edge_tts(summaries_text)
175
- st.write("### πŸ“œ Extended References & Summaries")
176
- play_and_download_audio(audio_file_refs)
177
 
178
  if titles_summary:
179
- titles = []
180
- for line in refs.split('\n'):
181
- m = re.search(r"\[([^\]]+)\]", line)
182
- if m:
183
- titles.append(m.group(1))
184
  if titles:
185
- titles_text = "Here are the titles of the papers: " + ", ".join(titles)
186
- titles_text = clean_for_speech(titles_text)
187
- audio_file_titles = speak_with_edge_tts(titles_text)
188
  st.write("### πŸ”– Paper Titles")
189
- play_and_download_audio(audio_file_titles)
 
190
 
191
- elapsed = time.time()-start
192
- st.write(f"**Total Elapsed:** {elapsed:.2f} s")
193
- create_file(q, result, "md")
194
  return result
195
 
196
- def process_image(image_path, user_prompt):
197
- with open(image_path, "rb") as imgf:
198
- image_data = imgf.read()
199
- b64img = base64.b64encode(image_data).decode("utf-8")
200
- resp = openai_client.chat.completions.create(
201
- model=st.session_state["openai_model"],
202
- messages=[
203
- {"role": "system", "content": "You are a helpful assistant."},
204
- {"role": "user", "content": [
205
- {"type": "text", "text": user_prompt},
206
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
207
- ]}
208
- ],
209
- temperature=0.0,
210
- )
211
- return resp.choices[0].message.content
212
-
213
- def process_audio(audio_path):
214
- with open(audio_path, "rb") as f:
215
- transcription = openai_client.audio.transcriptions.create(model="whisper-1", file=f)
216
- st.session_state.messages.append({"role": "user", "content": transcription.text})
217
- return transcription.text
218
-
219
- def process_video(video_path, seconds_per_frame=1):
220
- vid = cv2.VideoCapture(video_path)
221
- total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
222
- fps = vid.get(cv2.CAP_PROP_FPS)
223
- skip = int(fps*seconds_per_frame)
224
- frames_b64 = []
225
- for i in range(0, total, skip):
226
- vid.set(cv2.CAP_PROP_POS_FRAMES, i)
227
- ret, frame = vid.read()
228
- if not ret: break
229
- _, buf = cv2.imencode(".jpg", frame)
230
- frames_b64.append(base64.b64encode(buf).decode("utf-8"))
231
- vid.release()
232
- return frames_b64
233
-
234
- def process_video_with_gpt(video_path, prompt):
235
- frames = process_video(video_path)
236
- resp = openai_client.chat.completions.create(
237
- model=st.session_state["openai_model"],
238
- messages=[
239
- {"role":"system","content":"Analyze video frames."},
240
- {"role":"user","content":[
241
- {"type":"text","text":prompt},
242
- *[{"type":"image_url","image_url":{"url":f"data:image/jpeg;base64,{fr}"}} for fr in frames]
243
- ]}
244
- ]
245
- )
246
- return resp.choices[0].message.content
247
-
248
  def process_with_gpt(text):
249
- if not text: return
 
 
250
  st.session_state.messages.append({"role":"user","content":text})
251
- with st.chat_message("user"):
252
- st.markdown(text)
253
  with st.chat_message("assistant"):
254
- c = openai_client.chat.completions.create(
255
  model=st.session_state["openai_model"],
256
  messages=st.session_state.messages,
257
  stream=False
258
  )
259
- ans = c.choices[0].message.content
260
- st.write("GPT-4o: " + ans)
261
- create_file(text, ans, "md")
262
  st.session_state.messages.append({"role":"assistant","content":ans})
263
  return ans
264
 
265
  def process_with_claude(text):
266
- if not text: return
267
- with st.chat_message("user"):
268
- st.markdown(text)
 
269
  with st.chat_message("assistant"):
270
- r = claude_client.messages.create(
271
  model="claude-3-sonnet-20240229",
272
  max_tokens=1000,
273
  messages=[{"role":"user","content":text}]
274
  )
275
- ans = r.content[0].text
276
- st.write("Claude-3.5: " + ans)
277
- create_file(text, ans, "md")
278
  st.session_state.chat_history.append({"user":text,"claude":ans})
279
  return ans
280
 
281
- def create_zip_of_files(md_files, mp3_files):
282
- md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
283
- all_files = md_files + mp3_files
284
- if not all_files:
285
- return None
286
- stems = [os.path.splitext(os.path.basename(f))[0] for f in all_files]
287
- joined = "_".join(stems)
288
- if len(joined) > 50:
289
- joined = joined[:50] + "_etc"
290
- zip_name = f"{joined}.zip"
291
- with zipfile.ZipFile(zip_name,'w') as z:
292
- for f in all_files:
293
- z.write(f)
294
- return zip_name
295
-
296
- def load_files_for_sidebar():
297
- md_files = glob.glob("*.md")
298
- mp3_files = glob.glob("*.mp3")
299
-
300
- md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
301
-
302
- all_files = md_files + mp3_files
303
-
304
- groups = defaultdict(list)
305
- for f in all_files:
306
- fname = os.path.basename(f)
307
- prefix = fname[:10]
308
- groups[prefix].append(f)
309
-
310
- for prefix in groups:
311
- groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
312
-
313
- sorted_prefixes = sorted(groups.keys(), key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]), reverse=True)
314
- return groups, sorted_prefixes
315
-
316
- def extract_keywords_from_md(files):
317
- text = ""
318
- for f in files:
319
- if f.endswith(".md"):
320
- c = open(f,'r',encoding='utf-8').read()
321
- text += " " + c
322
- words = re.findall(r"\w+", text.lower())
323
- unique_words = []
324
- for w in words:
325
- if w not in unique_words:
326
- unique_words.append(w)
327
- if len(unique_words) == 5:
328
- break
329
- return unique_words
330
-
331
- def display_file_manager_sidebar(groups, sorted_prefixes):
332
- st.sidebar.title("🎡 Audio & Document Manager")
333
-
334
- all_md = []
335
- all_mp3 = []
336
- for prefix in groups:
337
- for f in groups[prefix]:
338
- if f.endswith(".md"):
339
- all_md.append(f)
340
- elif f.endswith(".mp3"):
341
- all_mp3.append(f)
342
-
343
- top_bar = st.sidebar.columns(3)
344
- with top_bar[0]:
345
- if st.button("πŸ—‘ Del All MD"):
346
- for f in all_md:
347
- os.remove(f)
348
- st.session_state.should_rerun = True
349
- with top_bar[1]:
350
- if st.button("πŸ—‘ Del All MP3"):
351
- for f in all_mp3:
352
- os.remove(f)
353
- st.session_state.should_rerun = True
354
- with top_bar[2]:
355
- if st.button("⬇️ Zip All"):
356
- z = create_zip_of_files(all_md, all_mp3)
357
- if z:
358
- st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
359
-
360
- for prefix in sorted_prefixes:
361
- files = groups[prefix]
362
- kw = extract_keywords_from_md(files)
363
- keywords_str = " ".join(kw) if kw else "No Keywords"
364
- with st.sidebar.expander(f"{prefix} Files ({len(files)}) - Keywords: {keywords_str}", expanded=True):
365
- c1,c2 = st.columns(2)
366
- with c1:
367
- if st.button("πŸ‘€View Group", key="view_group_"+prefix):
368
- st.session_state.viewing_prefix = prefix
369
- with c2:
370
- if st.button("πŸ—‘Del Group", key="del_group_"+prefix):
371
- for f in files:
372
- os.remove(f)
373
- st.success(f"Deleted all files in group {prefix} successfully!")
374
- st.session_state.should_rerun = True
375
-
376
- for f in files:
377
- fname = os.path.basename(f)
378
- ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
379
- st.write(f"**{fname}** - {ctime}")
380
-
381
- def run_selected_model(option, user_input):
382
- user_input = user_input.strip()
383
- if option == "Arxiv":
384
- st.subheader("Arxiv Only Results:")
385
- perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
386
- elif option == "GPT-4o":
387
- process_with_gpt(user_input)
388
- elif option == "Claude-3.5":
389
- process_with_claude(user_input)
390
 
 
391
  def main():
392
- st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
393
- tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
394
-
395
- mycomponent = components.declare_component("mycomponent", path="mycomponent")
396
- val = mycomponent(my_input_value="Hello")
397
-
398
- # Show input in a text box for editing if detected
399
- if val:
400
- val_stripped = val.replace('\n', ' ')
401
- edited_input = st.text_area("Edit your detected input:", value=val_stripped, height=100)
402
- run_option = st.selectbox("Select AI Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
403
- col1, col2 = st.columns(2)
404
- with col1:
405
- autorun = st.checkbox("AutoRun on input change", value=False)
406
- with col2:
407
- full_audio = st.checkbox("Generate Complete Audio", value=False,
408
- help="Generate audio for the complete response including all papers and summaries")
409
-
410
- input_changed = (val != st.session_state.old_val)
411
-
412
- if autorun and input_changed:
413
- st.session_state.old_val = val
414
- if run_option == "Arxiv":
415
- perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
416
- titles_summary=True, full_audio=full_audio)
417
- else:
418
- run_selected_model(run_option, edited_input)
419
- else:
420
- if st.button("Process Input"):
421
- st.session_state.old_val = val
422
- if run_option == "Arxiv":
423
- perform_ai_lookup(edited_input, vocal_summary=True, extended_refs=False,
424
- titles_summary=True, full_audio=full_audio)
425
- else:
426
- run_selected_model(run_option, edited_input)
427
 
428
  if tab_main == "πŸ” Search ArXiv":
429
- st.subheader("πŸ” Search ArXiv")
430
  q = st.text_input("Research query:")
431
-
432
- st.markdown("### πŸŽ›οΈ Audio Generation Options")
433
- vocal_summary = st.checkbox("πŸŽ™οΈ Vocal Summary (Short Answer)", value=True)
434
- extended_refs = st.checkbox("πŸ“œ Extended References & Summaries (Long)", value=False)
435
- titles_summary = st.checkbox("πŸ”– Paper Titles Only", value=True)
436
- full_audio = st.checkbox("πŸ“š Generate Complete Audio Response", value=False,
437
- help="Generate audio for the complete response including all papers and summaries")
438
-
439
- if q and st.button("Run ArXiv Query"):
440
- perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
441
- titles_summary=titles_summary, full_audio=full_audio)
442
-
443
- elif tab_main == "🎀 Voice Input":
444
- st.subheader("🎀 Voice Recognition")
445
- user_text = st.text_area("Message:", height=100)
446
- user_text = user_text.strip().replace('\n', ' ')
447
  if st.button("Send πŸ“¨"):
448
- process_with_gpt(user_text)
449
- st.subheader("πŸ“œ Chat History")
450
- t1,t2=st.tabs(["Claude History","GPT-4o History"])
451
- with t1:
452
- for c in st.session_state.chat_history:
453
- st.write("**You:**", c["user"])
454
- st.write("**Claude:**", c["claude"])
455
- with t2:
456
- for m in st.session_state.messages:
457
- with st.chat_message(m["role"]):
458
- st.markdown(m["content"])
459
-
460
- elif tab_main == "πŸ“Έ Media Gallery":
461
- st.header("🎬 Media Gallery - Images and Videos")
462
- tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
463
- with tabs[0]:
464
- imgs = glob.glob("*.png")+glob.glob("*.jpg")
465
- if imgs:
466
- c = st.slider("Cols",1,5,3)
467
- cols = st.columns(c)
468
- for i,f in enumerate(imgs):
469
- with cols[i%c]:
470
- st.image(Image.open(f),use_container_width=True)
471
- if st.button(f"πŸ‘€ Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
472
- a = process_image(f,"Describe this image.")
473
- st.markdown(a)
474
- else:
475
- st.write("No images found.")
476
- with tabs[1]:
477
- vids = glob.glob("*.mp4")
478
- if vids:
479
- for v in vids:
480
- with st.expander(f"πŸŽ₯ {os.path.basename(v)}"):
481
- st.video(v)
482
- if st.button(f"Analyze {os.path.basename(v)}", key=f"analyze_{v}"):
483
- a = process_video_with_gpt(v,"Describe video.")
484
- st.markdown(a)
485
  else:
486
- st.write("No videos found.")
487
-
488
- elif tab_main == "πŸ“ File Editor":
489
- if getattr(st.session_state,'current_file',None):
490
- st.subheader(f"Editing: {st.session_state.current_file}")
491
- new_text = st.text_area("Content:", st.session_state.file_content, height=300)
492
- if st.button("Save"):
493
- with open(st.session_state.current_file,'w',encoding='utf-8') as f:
494
- f.write(new_text)
495
- st.success("Updated!")
496
- st.session_state.should_rerun = True
497
- else:
498
- st.write("Select a file from the sidebar to edit.")
499
-
500
- groups, sorted_prefixes = load_files_for_sidebar()
501
- display_file_manager_sidebar(groups, sorted_prefixes)
502
-
503
- if st.session_state.viewing_prefix and st.session_state.viewing_prefix in groups:
504
- st.write("---")
505
- st.write(f"**Viewing Group:** {st.session_state.viewing_prefix}")
506
- for f in groups[st.session_state.viewing_prefix]:
507
- fname = os.path.basename(f)
508
- ext = os.path.splitext(fname)[1].lower().strip('.')
509
- st.write(f"### {fname}")
510
- if ext == "md":
511
- content = open(f,'r',encoding='utf-8').read()
512
- st.markdown(content)
513
- elif ext == "mp3":
514
- st.audio(f)
515
- else:
516
- st.markdown(get_download_link(f), unsafe_allow_html=True)
517
- if st.button("Close Group View"):
518
- st.session_state.viewing_prefix = None
519
-
520
- if st.session_state.should_rerun:
521
- st.session_state.should_rerun = False
522
- st.rerun()
523
 
524
- if __name__=="__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
  main()
 
1
  import streamlit as st
2
+ import anthropic, openai, base64, cv2, glob, os, re, time, zipfile
 
 
3
  from datetime import datetime
4
+ from collections import defaultdict
 
 
5
  from dotenv import load_dotenv
6
  from gradio_client import Client
 
 
7
  from PIL import Image
 
 
 
8
  from openai import OpenAI
9
+ import asyncio, edge_tts
 
 
 
10
 
11
+ # 🎯 1. Core Configuration & Setup
12
  st.set_page_config(
13
+ page_title="🚲BikeAI Research", page_icon="🚲", layout="wide",
14
+ menu_items={'About': "🚲BikeAI Research Assistant"}
 
 
 
 
 
 
 
15
  )
16
  load_dotenv()
17
 
18
+ # πŸ”‘ 2. API Setup
19
+ openai_api_key = st.secrets.get('OPENAI_API_KEY', os.getenv('OPENAI_API_KEY', ""))
20
+ anthropic_key = st.secrets.get('ANTHROPIC_API_KEY', os.getenv('ANTHROPIC_API_KEY_3', ""))
21
+ openai_client = OpenAI(api_key=openai_api_key, organization=os.getenv('OPENAI_ORG_ID'))
 
 
 
 
22
  claude_client = anthropic.Anthropic(api_key=anthropic_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ # πŸ“ 3. Session State
25
+ for key in ['transcript_history', 'chat_history', 'messages', 'viewing_prefix', 'should_rerun', 'old_val']:
26
+ if key not in st.session_state:
27
+ st.session_state[key] = [] if key in ['transcript_history', 'chat_history', 'messages'] else None
28
+
29
+ st.session_state.setdefault('openai_model', "gpt-4o-2024-05-13")
30
+
31
+ # 🧠 4. Content Processing
32
+ def get_high_info_terms(text: str) -> list:
33
+ """Extract high-information terms from text"""
34
+ stop_words = set([
35
+ 'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with',
36
+ 'by', 'from', 'up', 'about', 'into', 'over', 'after', 'be', 'been', 'being', 'have',
37
+ 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'should', 'could', 'this', 'that'
38
+ ])
39
+
40
+ key_phrases = [
41
+ 'artificial intelligence', 'machine learning', 'deep learning', 'neural network',
42
+ 'natural language', 'computer vision', 'data science', 'reinforcement learning',
43
+ 'large language model', 'transformer model', 'quantum computing', 'arxiv search'
44
+ ]
45
+
46
+ preserved_phrases = [phrase for phrase in key_phrases if phrase in text.lower()]
47
+ text = text.lower()
48
+ for phrase in preserved_phrases:
49
+ text = text.replace(phrase, '')
50
+
51
+ words = [word.lower() for word in re.findall(r'\b\w+(?:-\w+)*\b', text)
52
+ if len(word) > 3 and word.lower() not in stop_words
53
+ and not word.isdigit() and any(c.isalpha() for c in word)]
54
+
55
+ unique_terms = list(dict.fromkeys(preserved_phrases + words))
56
+ return unique_terms[:5]
57
+
58
+ # πŸ“ 5. File Operations
59
  def generate_filename(content, file_type="md"):
60
+ """Generate filename with high-information terms"""
61
  prefix = datetime.now().strftime("%y%m_%H%M") + "_"
62
+ info_terms = get_high_info_terms(content)
63
+ name_text = '_'.join(term.replace(' ', '-') for term in info_terms) if info_terms else 'file'
64
+ return f"{prefix}{name_text[:100]}.{file_type}"
 
65
 
66
  def create_file(prompt, response, file_type="md"):
67
+ """Create a new file with generated filename"""
68
+ filename = generate_filename(response.strip() or prompt.strip(), file_type)
69
  with open(filename, 'w', encoding='utf-8') as f:
70
+ f.write(f"{prompt}\n\n{response}")
71
  return filename
72
 
73
+ def create_zip_of_files(md_files, mp3_files):
74
+ """Create zip with intelligent naming"""
75
+ all_files = [f for f in md_files if 'readme.md' not in f.lower()] + mp3_files
76
+ if not all_files:
77
+ return None
78
+
79
+ content = " ".join(open(f, 'r', encoding='utf-8').read() if f.endswith('.md')
80
+ else os.path.basename(f) for f in all_files)
81
+
82
+ timestamp = datetime.now().strftime("%y%m_%H%M")
83
+ info_terms = get_high_info_terms(content)[:3]
84
+ zip_name = f"{timestamp}_{'_'.join(t.replace(' ', '-') for t in info_terms)}.zip"
85
+
86
+ with zipfile.ZipFile(zip_name, 'w') as z:
87
+ for f in all_files:
88
+ z.write(f)
89
+ return zip_name
90
+
91
+ # πŸ”Š 6. Audio Processing
92
+ def clean_for_speech(text: str) -> str:
93
+ """Prepare text for speech synthesis"""
94
+ text = re.sub(r'\n|</s>|#|\(https?:\/\/[^\)]+\)|\s+', ' ', text)
95
+ return text.strip()
96
 
97
  async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0):
98
+ """Generate audio file using Edge TTS"""
99
  text = clean_for_speech(text)
100
  if not text.strip():
101
  return None
102
+ communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
103
+ out_fn = generate_filename(text, "mp3")
 
 
104
  await communicate.save(out_fn)
105
  return out_fn
106
 
107
  def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
108
+ """Wrapper for edge TTS generation"""
109
  return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch))
110
 
111
+ # 🎬 7. Media Processing
112
+ def process_image(image_path, user_prompt):
113
+ """Process image with GPT-4V"""
114
+ with open(image_path, "rb") as imgf:
115
+ b64img = base64.b64encode(imgf.read()).decode("utf-8")
116
+ resp = openai_client.chat.completions.create(
117
+ model=st.session_state["openai_model"],
118
+ messages=[
119
+ {"role": "system", "content": "You are a helpful assistant."},
120
+ {"role": "user", "content": [
121
+ {"type": "text", "text": user_prompt},
122
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64img}"}}
123
+ ]}
124
+ ],
125
+ temperature=0.0,
126
+ )
127
+ return resp.choices[0].message.content
128
 
129
+ # πŸ€– 8. AI Integration
130
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False):
131
+ """Perform Arxiv search and generate audio summaries"""
132
  start = time.time()
133
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
134
+ refs = client.predict(q, 20, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1",
135
+ api_name="/update_with_rag_md")[0]
136
+ answer = client.predict(q, "mistralai/Mixtral-8x7B-Instruct-v0.1", True, api_name="/ask_llm")
137
+ result = f"### πŸ”Ž {q}\n\n{answer}\n\n{refs}"
138
 
139
  st.markdown(result)
140
 
 
141
  if full_audio:
142
+ complete_text = f"Query: {q}. {clean_for_speech(answer)} {clean_for_speech(refs)}"
 
143
  st.write("### πŸ“š Complete Audio Response")
144
+ play_and_download_audio(speak_with_edge_tts(complete_text))
145
 
146
  if vocal_summary:
147
+ st.write("### πŸŽ™οΈ Vocal Summary")
148
+ play_and_download_audio(speak_with_edge_tts(clean_for_speech(answer)))
 
 
149
 
150
  if extended_refs:
151
+ st.write("### πŸ“œ Extended References")
152
+ play_and_download_audio(speak_with_edge_tts(
153
+ "Reference summaries: " + clean_for_speech(refs)))
 
 
154
 
155
  if titles_summary:
156
+ titles = [m.group(1) for m in re.finditer(r"\[([^\]]+)\]", refs)]
 
 
 
 
157
  if titles:
 
 
 
158
  st.write("### πŸ”– Paper Titles")
159
+ play_and_download_audio(speak_with_edge_tts(
160
+ "Paper titles: " + ", ".join(titles)))
161
 
162
+ st.write(f"**Time:** {time.time()-start:.2f} s")
163
+ create_file(q, result)
 
164
  return result
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  def process_with_gpt(text):
167
+ """Process text with GPT-4"""
168
+ if not text:
169
+ return
170
  st.session_state.messages.append({"role":"user","content":text})
171
+ with st.chat_message("user"): st.markdown(text)
 
172
  with st.chat_message("assistant"):
173
+ resp = openai_client.chat.completions.create(
174
  model=st.session_state["openai_model"],
175
  messages=st.session_state.messages,
176
  stream=False
177
  )
178
+ ans = resp.choices[0].message.content
179
+ st.write(f"GPT-4o: {ans}")
180
+ create_file(text, ans)
181
  st.session_state.messages.append({"role":"assistant","content":ans})
182
  return ans
183
 
184
  def process_with_claude(text):
185
+ """Process text with Claude"""
186
+ if not text:
187
+ return
188
+ with st.chat_message("user"): st.markdown(text)
189
  with st.chat_message("assistant"):
190
+ resp = claude_client.messages.create(
191
  model="claude-3-sonnet-20240229",
192
  max_tokens=1000,
193
  messages=[{"role":"user","content":text}]
194
  )
195
+ ans = resp.content[0].text
196
+ st.write(f"Claude-3.5: {ans}")
197
+ create_file(text, ans)
198
  st.session_state.chat_history.append({"user":text,"claude":ans})
199
  return ans
200
 
201
+ def play_and_download_audio(file_path):
202
+ """Play and provide download link for audio file"""
203
+ if file_path and os.path.exists(file_path):
204
+ st.audio(file_path)
205
+ dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
206
+ st.markdown(dl_link, unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
 
208
+ # 🎯 9. Main Application
209
  def main():
210
+ st.sidebar.title("🚲BikeAI Research")
211
+ tab_main = st.radio("Action:", ["πŸ” Search ArXiv", "🎀 Voice", "πŸ“Έ Media", "πŸ“ Files"], horizontal=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
 
213
  if tab_main == "πŸ” Search ArXiv":
 
214
  q = st.text_input("Research query:")
215
+ st.markdown("### πŸŽ›οΈ Audio Options")
216
+ cols = st.columns(4)
217
+ options = {
218
+ 'vocal_summary': cols[0].checkbox("πŸŽ™οΈ Summary", value=True),
219
+ 'extended_refs': cols[1].checkbox("πŸ“œ References", value=False),
220
+ 'titles_summary': cols[2].checkbox("πŸ”– Titles", value=True),
221
+ 'full_audio': cols[3].checkbox("πŸ“š Full Response", value=False,
222
+ help="Generate complete audio including papers")
223
+ }
224
+ if q and st.button("Search"):
225
+ perform_ai_lookup(q, **options)
226
+
227
+ elif tab_main == "🎀 Voice":
228
+ user_text = st.text_area("Message:", height=100).strip()
229
+ model = st.selectbox("Model:", ["GPT-4o", "Claude-3.5"])
 
230
  if st.button("Send πŸ“¨"):
231
+ if model == "GPT-4o":
232
+ process_with_gpt(user_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  else:
234
+ process_with_claude(user_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
+ elif tab_main == "πŸ“Έ Media":
237
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "πŸŽ₯ Video"])
238
+ with tabs[0]:
239
+ for f in glob.glob("*.png") + glob.glob("*.jpg"):
240
+ st.image(Image.open(f))
241
+ if st.button(f"Analyze {os.path.basename(f)}", key=f"analyze_{f}"):
242
+ st.markdown(process_image(f, "Describe this image."))
243
+
244
+ elif tab_main == "πŸ“ Files":
245
+ md_files = [f for f in glob.glob("*.md") if 'readme.md' not in f.lower()]
246
+ mp3_files = glob.glob("*.mp3")
247
+ if st.button("Create Archive"):
248
+ zip_file = create_zip_of_files(md_files, mp3_files)
249
+ if zip_file:
250
+ with open(zip_file, "rb") as f:
251
+ st.download_button("Download Archive", f, zip_file)
252
+
253
+ if __name__ == "__main__":
254
  main()