awacke1 commited on
Commit
fdc1acb
β€’
1 Parent(s): 5f1ea92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +189 -32
app.py CHANGED
@@ -19,8 +19,10 @@ import extra_streamlit_components as stx
19
  from streamlit.runtime.scriptrunner import get_script_run_ctx
20
  import asyncio
21
  import edge_tts
 
 
22
 
23
- # 🎯 1. Core Configuration & Setup
24
  st.set_page_config(
25
  page_title="🚲BikeAIπŸ† Claude/GPT Research",
26
  page_icon="πŸš²πŸ†",
@@ -34,7 +36,7 @@ st.set_page_config(
34
  )
35
  load_dotenv()
36
 
37
- # πŸ”‘ 2. API Setup & Clients
38
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
39
  anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
40
  if 'OPENAI_API_KEY' in st.secrets:
@@ -48,7 +50,7 @@ openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_OR
48
  HF_KEY = os.getenv('HF_KEY')
49
  API_URL = os.getenv('API_URL')
50
 
51
- # πŸ“ 3. Session State Management
52
  if 'transcript_history' not in st.session_state:
53
  st.session_state['transcript_history'] = []
54
  if 'chat_history' not in st.session_state:
@@ -72,7 +74,7 @@ if 'should_rerun' not in st.session_state:
72
  if 'old_val' not in st.session_state:
73
  st.session_state['old_val'] = None
74
 
75
- # 🎨 4. Custom CSS
76
  st.markdown("""
77
  <style>
78
  .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
@@ -88,7 +90,7 @@ FILE_EMOJIS = {
88
  "mp3": "🎡",
89
  }
90
 
91
- # 🧠 5. High-Information Content Extraction
92
  def get_high_info_terms(text: str) -> list:
93
  """Extract high-information terms from text, including key phrases"""
94
  stop_words = set([
@@ -141,9 +143,8 @@ def get_high_info_terms(text: str) -> list:
141
  max_terms = 5
142
  return unique_terms[:max_terms]
143
 
144
- # πŸ“ 6. File Operations
145
  def generate_filename(content, file_type="md"):
146
- """Generate filename with meaningful terms"""
147
  prefix = datetime.now().strftime("%y%m_%H%M") + "_"
148
  info_terms = get_high_info_terms(content)
149
  name_text = '_'.join(term.replace(' ', '-') for term in info_terms) if info_terms else 'file'
@@ -155,20 +156,7 @@ def generate_filename(content, file_type="md"):
155
  filename = f"{prefix}{name_text}.{file_type}"
156
  return filename
157
 
158
- def create_file(prompt, response, file_type="md"):
159
- """Create file with intelligent naming"""
160
- filename = generate_filename(response.strip() if response.strip() else prompt.strip(), file_type)
161
- with open(filename, 'w', encoding='utf-8') as f:
162
- f.write(prompt + "\n\n" + response)
163
- return filename
164
-
165
- def get_download_link(file):
166
- """Generate download link for file"""
167
- with open(file, "rb") as f:
168
- b64 = base64.b64encode(f.read()).decode()
169
- return f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
170
-
171
- # πŸ”Š 7. Audio Processing
172
  def clean_for_speech(text: str) -> str:
173
  """Clean text for speech synthesis"""
174
  text = text.replace("\n", " ")
@@ -214,7 +202,7 @@ def play_and_download_audio(file_path):
214
  dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
215
  st.markdown(dl_link, unsafe_allow_html=True)
216
 
217
- # 🎬 8. Media Processing
218
  def process_image(image_path, user_prompt):
219
  """Process image with GPT-4V"""
220
  with open(image_path, "rb") as imgf:
@@ -271,7 +259,7 @@ def process_video_with_gpt(video_path, prompt):
271
  )
272
  return resp.choices[0].message.content
273
 
274
- # πŸ€– 9. AI Model Integration
275
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False):
276
  """Perform Arxiv search and generate audio summaries"""
277
  start = time.time()
@@ -318,7 +306,114 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
318
 
319
  elapsed = time.time()-start
320
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
321
- create_file(q, result, "md")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
  return result
323
 
324
  def process_with_gpt(text):
@@ -335,7 +430,8 @@ def process_with_gpt(text):
335
  )
336
  ans = c.choices[0].message.content
337
  st.write("GPT-4o: " + ans)
338
- create_file(text, ans, "md")
 
339
  st.session_state.messages.append({"role":"assistant","content":ans})
340
  return ans
341
 
@@ -352,11 +448,12 @@ def process_with_claude(text):
352
  )
353
  ans = r.content[0].text
354
  st.write("Claude-3.5: " + ans)
355
- create_file(text, ans, "md")
 
356
  st.session_state.chat_history.append({"user":text,"claude":ans})
357
  return ans
358
 
359
- # πŸ“‚ 10. File Management
360
  def create_zip_of_files(md_files, mp3_files):
361
  """Create zip with intelligent naming"""
362
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
@@ -364,7 +461,6 @@ def create_zip_of_files(md_files, mp3_files):
364
  if not all_files:
365
  return None
366
 
367
- # Collect content for high-info term extraction
368
  all_content = []
369
  for f in all_files:
370
  if f.endswith('.md'):
@@ -445,7 +541,10 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
445
  if st.button("⬇️ Zip All"):
446
  z = create_zip_of_files(all_md, all_mp3)
447
  if z:
448
- st.sidebar.markdown(get_download_link(z),unsafe_allow_html=True)
 
 
 
449
 
450
  for prefix in sorted_prefixes:
451
  files = groups[prefix]
@@ -468,7 +567,62 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
468
  ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
469
  st.write(f"**{fname}** - {ctime}")
470
 
471
- # 🎯 11. Main Application
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472
  def main():
473
  st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
474
  tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
@@ -600,7 +754,10 @@ def main():
600
  elif ext == "mp3":
601
  st.audio(f)
602
  else:
603
- st.markdown(get_download_link(f), unsafe_allow_html=True)
 
 
 
604
  if st.button("Close Group View"):
605
  st.session_state.viewing_prefix = None
606
 
@@ -609,4 +766,4 @@ def main():
609
  st.rerun()
610
 
611
  if __name__=="__main__":
612
- main()
 
19
  from streamlit.runtime.scriptrunner import get_script_run_ctx
20
  import asyncio
21
  import edge_tts
22
+ import io
23
+ import sys
24
 
25
+ # 1. Core Configuration & Setup
26
  st.set_page_config(
27
  page_title="🚲BikeAIπŸ† Claude/GPT Research",
28
  page_icon="πŸš²πŸ†",
 
36
  )
37
  load_dotenv()
38
 
39
+ # 2. API Setup & Clients
40
  openai_api_key = os.getenv('OPENAI_API_KEY', "")
41
  anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
42
  if 'OPENAI_API_KEY' in st.secrets:
 
50
  HF_KEY = os.getenv('HF_KEY')
51
  API_URL = os.getenv('API_URL')
52
 
53
+ # 3. Session State Management
54
  if 'transcript_history' not in st.session_state:
55
  st.session_state['transcript_history'] = []
56
  if 'chat_history' not in st.session_state:
 
74
  if 'old_val' not in st.session_state:
75
  st.session_state['old_val'] = None
76
 
77
+ # 4. Custom CSS
78
  st.markdown("""
79
  <style>
80
  .main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
 
90
  "mp3": "🎡",
91
  }
92
 
93
+ # 5. High-Information Content Extraction
94
  def get_high_info_terms(text: str) -> list:
95
  """Extract high-information terms from text, including key phrases"""
96
  stop_words = set([
 
143
  max_terms = 5
144
  return unique_terms[:max_terms]
145
 
146
+ # 6. Filename Generation
147
  def generate_filename(content, file_type="md"):
 
148
  prefix = datetime.now().strftime("%y%m_%H%M") + "_"
149
  info_terms = get_high_info_terms(content)
150
  name_text = '_'.join(term.replace(' ', '-') for term in info_terms) if info_terms else 'file'
 
156
  filename = f"{prefix}{name_text}.{file_type}"
157
  return filename
158
 
159
+ # 7. Audio Processing
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  def clean_for_speech(text: str) -> str:
161
  """Clean text for speech synthesis"""
162
  text = text.replace("\n", " ")
 
202
  dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
203
  st.markdown(dl_link, unsafe_allow_html=True)
204
 
205
+ # 8. Media Processing
206
  def process_image(image_path, user_prompt):
207
  """Process image with GPT-4V"""
208
  with open(image_path, "rb") as imgf:
 
259
  )
260
  return resp.choices[0].message.content
261
 
262
+ # 9. AI Model Integration
263
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False):
264
  """Perform Arxiv search and generate audio summaries"""
265
  start = time.time()
 
306
 
307
  elapsed = time.time()-start
308
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
309
+
310
+ # We must provide a filename for the create_file function:
311
+ # Use generate_filename from 'q' and 'result'
312
+ filename = generate_filename(result, "md")
313
+ create_file(filename, q, result, should_save=True)
314
+
315
+ # --- Code Interpreter Integration ---
316
+ # Parse out papers from refs if available
317
+ # Format assumed:
318
+ # [Title] Title of Paper
319
+ # Summary: ...
320
+ # Link: ...
321
+ # PDF: ...
322
+ # separate by "[Title]"
323
+ papers_raw = refs.strip().split("[Title]")
324
+ papers = []
325
+ for p in papers_raw:
326
+ p = p.strip()
327
+ if not p:
328
+ continue
329
+ lines = p.split("\n")
330
+ title_line = lines[0].strip() if lines else ""
331
+ summary_line = ""
332
+ link_line = ""
333
+ pdf_line = ""
334
+ for line in lines[1:]:
335
+ line = line.strip()
336
+ if line.startswith("Summary:"):
337
+ summary_line = line.replace("Summary:", "").strip()
338
+ elif line.startswith("Link:"):
339
+ link_line = line.replace("Link:", "").strip()
340
+ elif line.startswith("PDF:"):
341
+ pdf_line = line.replace("PDF:", "").strip()
342
+
343
+ if title_line and summary_line:
344
+ papers.append({
345
+ "title": title_line,
346
+ "summary": summary_line,
347
+ "link": link_line,
348
+ "pdf": pdf_line
349
+ })
350
+
351
+ st.write("## Code Interpreter Options for Each Paper")
352
+ for i, paper in enumerate(papers):
353
+ st.write(f"**Paper {i+1}:** {paper['title']}")
354
+ st.write(f"**Summary:** {paper['summary']}")
355
+ if paper['link']:
356
+ st.write(f"[Arxiv Link]({paper['link']})")
357
+ if paper['pdf']:
358
+ st.write(f"[PDF]({paper['pdf']})")
359
+
360
+ code_interpreter = st.checkbox(f"Code Interpreter for '{paper['title']}'", key=f"ci_{i}")
361
+ if code_interpreter:
362
+ code_task = st.text_area(
363
+ f"Describe the Python/Streamlit functionality to implement based on this paper:",
364
+ height=100, key=f"code_task_{i}"
365
+ )
366
+ if st.button(f"Generate Code for Paper {i+1}", key=f"gen_code_{i}"):
367
+ if code_task.strip():
368
+ # Prompt the model to generate code
369
+ code_prompt = f"""
370
+ You are a coding assistant.
371
+ The user has a research paper titled: "{paper['title']}"
372
+ and summary: "{paper['summary']}".
373
+ The user wants the following functionality implemented in Python with Streamlit and possible HTML5 components:
374
+ "{code_task}"
375
+
376
+ Requirements:
377
+ - The code should be self-contained Python code, runnable within this Streamlit environment.
378
+ - It should use `streamlit` library for UI and `print()` for textual outputs.
379
+ - Provide only the Python code block, do not include extra explanations.
380
+ """
381
+
382
+ completion = openai_client.chat.completions.create(
383
+ model=st.session_state["openai_model"],
384
+ messages=[
385
+ {"role": "system", "content": "You are a helpful coding assistant."},
386
+ {"role": "user", "content": code_prompt}
387
+ ],
388
+ temperature=0.0
389
+ )
390
+ generated_code = completion.choices[0].message.content
391
+
392
+ st.write("### Generated Code")
393
+ st.code(generated_code, language="python")
394
+
395
+ # Execute the generated code
396
+ exec_locals = {}
397
+ original_stdout = sys.stdout
398
+ redirected_output = io.StringIO()
399
+ sys.stdout = redirected_output
400
+ try:
401
+ exec(generated_code, {}, exec_locals)
402
+ except Exception as e:
403
+ st.error(f"Error running generated code: {e}")
404
+ finally:
405
+ sys.stdout = original_stdout
406
+
407
+ code_output = redirected_output.getvalue()
408
+ st.write("### Code Output")
409
+ st.write(code_output)
410
+
411
+ # TTS on code output
412
+ if code_output.strip():
413
+ audio_file = speak_with_edge_tts(code_output)
414
+ if audio_file:
415
+ play_and_download_audio(audio_file)
416
+
417
  return result
418
 
419
  def process_with_gpt(text):
 
430
  )
431
  ans = c.choices[0].message.content
432
  st.write("GPT-4o: " + ans)
433
+ filename = generate_filename(ans.strip() if ans.strip() else text.strip(), "md")
434
+ create_file(filename, text, ans, should_save=True)
435
  st.session_state.messages.append({"role":"assistant","content":ans})
436
  return ans
437
 
 
448
  )
449
  ans = r.content[0].text
450
  st.write("Claude-3.5: " + ans)
451
+ filename = generate_filename(ans.strip() if ans.strip() else text.strip(), "md")
452
+ create_file(filename, text, ans, should_save=True)
453
  st.session_state.chat_history.append({"user":text,"claude":ans})
454
  return ans
455
 
456
+ # 10. File Management
457
  def create_zip_of_files(md_files, mp3_files):
458
  """Create zip with intelligent naming"""
459
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
 
461
  if not all_files:
462
  return None
463
 
 
464
  all_content = []
465
  for f in all_files:
466
  if f.endswith('.md'):
 
541
  if st.button("⬇️ Zip All"):
542
  z = create_zip_of_files(all_md, all_mp3)
543
  if z:
544
+ with open(z, "rb") as f:
545
+ b64 = base64.b64encode(f.read()).decode()
546
+ dl_link = f'<a href="data:file/zip;base64,{b64}" download="{os.path.basename(z)}">πŸ“‚ Download {os.path.basename(z)}</a>'
547
+ st.sidebar.markdown(dl_link,unsafe_allow_html=True)
548
 
549
  for prefix in sorted_prefixes:
550
  files = groups[prefix]
 
567
  ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
568
  st.write(f"**{fname}** - {ctime}")
569
 
570
+ # 11. New create_file function
571
+ context = {} # Execution context for code blocks
572
+
573
+ def create_file(filename, prompt, response, should_save=True):
574
+ if not should_save:
575
+ return
576
+
577
+ base_filename, ext = os.path.splitext(filename)
578
+ combined_content = ""
579
+
580
+ # Add Prompt with markdown title and emoji
581
+ combined_content += "# Prompt πŸ“\n" + prompt + "\n\n"
582
+
583
+ # Add Response with markdown title and emoji
584
+ combined_content += "# Response πŸ’¬\n" + response + "\n\n"
585
+
586
+ # Check for code blocks in the response
587
+ resources = re.findall(r"```([\s\S]*?)```", response)
588
+ for resource in resources:
589
+ # Check if the resource contains Python code
590
+ if "python" in resource.lower():
591
+ cleaned_code = re.sub(r'^\s*python', '', resource, flags=re.IGNORECASE | re.MULTILINE)
592
+
593
+ # Add Code Results title with markdown and emoji
594
+ combined_content += "# Code Results πŸš€\n"
595
+
596
+ original_stdout = sys.stdout
597
+ sys.stdout = io.StringIO()
598
+
599
+ try:
600
+ exec(cleaned_code, context)
601
+ code_output = sys.stdout.getvalue()
602
+ combined_content += f"```\n{code_output}\n```\n\n"
603
+ realtimeEvalResponse = "# Code Results πŸš€\n" + "```" + code_output + "```\n\n"
604
+ st.code(realtimeEvalResponse)
605
+ except Exception as e:
606
+ combined_content += f"```python\nError executing Python code: {e}\n```\n\n"
607
+
608
+ sys.stdout = original_stdout
609
+ else:
610
+ # Add non-Python resources with markdown and emoji
611
+ combined_content += "# Resource πŸ› οΈ\n" + "```" + resource + "```\n\n"
612
+
613
+ # Save the combined content to a Markdown file
614
+ if should_save:
615
+ with open(f"{base_filename}.md", 'w') as file:
616
+ file.write(combined_content)
617
+ st.code(combined_content)
618
+
619
+ # Create a Base64 encoded link for the file
620
+ with open(f"{base_filename}.md", 'rb') as file:
621
+ encoded_file = base64.b64encode(file.read()).decode()
622
+ href = f'<a href="data:file/markdown;base64,{encoded_file}" download="{filename}">Download File πŸ“„</a>'
623
+ st.markdown(href, unsafe_allow_html=True)
624
+
625
+ # 12. Main Application
626
  def main():
627
  st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
628
  tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
 
754
  elif ext == "mp3":
755
  st.audio(f)
756
  else:
757
+ with open(f, "rb") as file:
758
+ b64 = base64.b64encode(file.read()).decode()
759
+ dl_link = f'<a href="data:file/{ext};base64,{b64}" download="{fname}">Download {fname}</a>'
760
+ st.markdown(dl_link, unsafe_allow_html=True)
761
  if st.button("Close Group View"):
762
  st.session_state.viewing_prefix = None
763
 
 
766
  st.rerun()
767
 
768
  if __name__=="__main__":
769
+ main()