awacke1 commited on
Commit
4da4d3b
β€’
1 Parent(s): 871a88e

Update backup19-processInputButton-app.py

Browse files
Files changed (1) hide show
  1. backup19-processInputButton-app.py +36 -78
backup19-processInputButton-app.py CHANGED
@@ -67,7 +67,6 @@ if 'viewing_prefix' not in st.session_state:
67
  st.session_state['viewing_prefix'] = None
68
  if 'should_rerun' not in st.session_state:
69
  st.session_state['should_rerun'] = False
70
- # CHANGED LINE: Track old input from mycomponent to avoid recreating files on rerun
71
  if 'old_val' not in st.session_state:
72
  st.session_state['old_val'] = None
73
 
@@ -91,7 +90,7 @@ def clean_for_speech(text: str) -> str:
91
  text = text.replace("\n", " ")
92
  text = text.replace("</s>", " ")
93
  text = text.replace("#", "")
94
- # Remove links like (https://...)
95
  text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
96
  text = re.sub(r"\s+", " ", text).strip()
97
  return text
@@ -143,7 +142,7 @@ def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0):
143
  def play_and_download_audio(file_path):
144
  if file_path and os.path.exists(file_path):
145
  st.audio(file_path)
146
- dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
147
  st.markdown(dl_link, unsafe_allow_html=True)
148
 
149
  def process_image(image_path, user_prompt):
@@ -198,17 +197,6 @@ def process_video_with_gpt(video_path, prompt):
198
  )
199
  return resp.choices[0].message.content
200
 
201
- def search_arxiv(query):
202
- st.write("πŸ” Searching ArXiv...")
203
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
204
- r1 = client.predict(prompt=query, llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1", stream_outputs=True, api_name="/ask_llm")
205
- st.markdown("### Mistral-8x7B-Instruct-v0.1 Result")
206
- st.markdown(r1)
207
- r2 = client.predict(prompt=query, llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2", stream_outputs=True, api_name="/ask_llm")
208
- st.markdown("### Mistral-7B-Instruct-v0.2 Result")
209
- st.markdown(r2)
210
- return f"{r1}\n\n{r2}"
211
-
212
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True):
213
  start = time.time()
214
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
@@ -219,7 +207,6 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
219
 
220
  st.markdown(result)
221
 
222
- # Clean for speech before TTS
223
  if vocal_summary:
224
  main_text = clean_for_speech(r2)
225
  audio_file_main = speak_with_edge_tts(main_text)
@@ -248,7 +235,6 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary
248
 
249
  elapsed = time.time()-start
250
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
251
- # CHANGED LINE: Only create a file after explicit user action is taken (already covered by button triggers)
252
  create_file(q, result, "md")
253
  return result
254
 
@@ -265,7 +251,6 @@ def process_with_gpt(text):
265
  )
266
  ans = c.choices[0].message.content
267
  st.write("GPT-4o: " + ans)
268
- # CHANGED LINE: create_file is safe here since it's triggered by a user action button
269
  create_file(text, ans, "md")
270
  st.session_state.messages.append({"role":"assistant","content":ans})
271
  return ans
@@ -281,14 +266,12 @@ def process_with_claude(text):
281
  messages=[{"role":"user","content":text}]
282
  )
283
  ans = r.content[0].text
284
- st.write("Claude: " + ans)
285
- # CHANGED LINE: create_file only after button press/user action
286
  create_file(text, ans, "md")
287
  st.session_state.chat_history.append({"user":text,"claude":ans})
288
  return ans
289
 
290
  def create_zip_of_files(md_files, mp3_files):
291
- # Exclude README.md
292
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
293
  all_files = md_files + mp3_files
294
  if not all_files:
@@ -307,7 +290,6 @@ def load_files_for_sidebar():
307
  md_files = glob.glob("*.md")
308
  mp3_files = glob.glob("*.mp3")
309
 
310
- # Exclude README.md
311
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
312
 
313
  all_files = md_files + mp3_files
@@ -315,14 +297,13 @@ def load_files_for_sidebar():
315
  groups = defaultdict(list)
316
  for f in all_files:
317
  fname = os.path.basename(f)
318
- prefix = fname[:10] # first 10 chars as group prefix
319
  groups[prefix].append(f)
320
 
321
  for prefix in groups:
322
  groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
323
 
324
  sorted_prefixes = sorted(groups.keys(), key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]), reverse=True)
325
-
326
  return groups, sorted_prefixes
327
 
328
  def extract_keywords_from_md(files):
@@ -382,7 +363,6 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
382
  if st.button("πŸ—‘Del Group", key="del_group_"+prefix):
383
  for f in files:
384
  os.remove(f)
385
- # CHANGED LINE: Show success message after deletion
386
  st.success(f"Deleted all files in group {prefix} successfully!")
387
  st.session_state.should_rerun = True
388
 
@@ -391,49 +371,45 @@ def display_file_manager_sidebar(groups, sorted_prefixes):
391
  ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
392
  st.write(f"**{fname}** - {ctime}")
393
 
 
 
 
 
 
 
 
 
 
 
394
  def main():
395
  st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
396
  tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
397
 
398
- model_choice = st.sidebar.radio("AI Model:", ["Arxiv","GPT-4o","Claude-3","GPT+Claude+Arxiv"], index=0)
 
399
 
400
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
401
  val = mycomponent(my_input_value="Hello")
402
 
403
- # CHANGED BLOCK: Wrap processing val in a button to avoid re-creation on rerun
404
  if val:
405
- st.write("**Input Detected:**", val)
406
- if st.button("Process Input"):
407
- # Only process if val is new
408
- if val != st.session_state.old_val:
 
 
 
 
 
 
 
 
 
 
409
  st.session_state.old_val = val
410
- if model_choice == "GPT-4o":
411
- process_with_gpt(val)
412
- elif model_choice == "Claude-3":
413
- process_with_claude(val)
414
- elif model_choice == "Arxiv":
415
- st.subheader("Arxiv Only Results:")
416
- perform_ai_lookup(val, vocal_summary=True, extended_refs=False, titles_summary=True)
417
- else:
418
- col1,col2,col3=st.columns(3)
419
- with col1:
420
- st.subheader("GPT-4o Omni:")
421
- try:
422
- process_with_gpt(val)
423
- except:
424
- st.write('GPT 4o error')
425
- with col2:
426
- st.subheader("Claude-3 Sonnet:")
427
- try:
428
- process_with_claude(val)
429
- except:
430
- st.write('Claude error')
431
- with col3:
432
- st.subheader("Arxiv + Mistral:")
433
- try:
434
- perform_ai_lookup(val, vocal_summary=True, extended_refs=False, titles_summary=True)
435
- except:
436
- st.write("Arxiv error")
437
 
438
  if tab_main == "πŸ” Search ArXiv":
439
  st.subheader("πŸ” Search ArXiv")
@@ -450,28 +426,10 @@ def main():
450
  elif tab_main == "🎀 Voice Input":
451
  st.subheader("🎀 Voice Recognition")
452
  user_text = st.text_area("Message:", height=100)
453
- user_text = user_text.strip()
454
  if st.button("Send πŸ“¨"):
455
- if user_text:
456
- if model_choice == "GPT-4o":
457
- process_with_gpt(user_text)
458
- elif model_choice == "Claude-3":
459
- process_with_claude(user_text)
460
- elif model_choice == "Arxiv":
461
- st.subheader("Arxiv Only Results:")
462
- perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
463
- else:
464
- col1,col2,col3=st.columns(3)
465
- with col1:
466
- st.subheader("GPT-4o Omni:")
467
- process_with_gpt(user_text)
468
- with col2:
469
- st.subheader("Claude-3 Sonnet:")
470
- process_with_claude(user_text)
471
- with col3:
472
- st.subheader("Arxiv & Mistral:")
473
- res = perform_ai_lookup(user_text, vocal_summary=True, extended_refs=False, titles_summary=True)
474
- st.markdown(res)
475
  st.subheader("πŸ“œ Chat History")
476
  t1,t2=st.tabs(["Claude History","GPT-4o History"])
477
  with t1:
 
67
  st.session_state['viewing_prefix'] = None
68
  if 'should_rerun' not in st.session_state:
69
  st.session_state['should_rerun'] = False
 
70
  if 'old_val' not in st.session_state:
71
  st.session_state['old_val'] = None
72
 
 
90
  text = text.replace("\n", " ")
91
  text = text.replace("</s>", " ")
92
  text = text.replace("#", "")
93
+ # Remove links
94
  text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
95
  text = re.sub(r"\s+", " ", text).strip()
96
  return text
 
142
  def play_and_download_audio(file_path):
143
  if file_path and os.path.exists(file_path):
144
  st.audio(file_path)
145
+ dl_link = f'<a href="data:audio/mpeg;base64,{base64.b64encode(open(file_path,"rb").read()).decode()}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}"</a>'
146
  st.markdown(dl_link, unsafe_allow_html=True)
147
 
148
  def process_image(image_path, user_prompt):
 
197
  )
198
  return resp.choices[0].message.content
199
 
 
 
 
 
 
 
 
 
 
 
 
200
  def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True):
201
  start = time.time()
202
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
 
207
 
208
  st.markdown(result)
209
 
 
210
  if vocal_summary:
211
  main_text = clean_for_speech(r2)
212
  audio_file_main = speak_with_edge_tts(main_text)
 
235
 
236
  elapsed = time.time()-start
237
  st.write(f"**Total Elapsed:** {elapsed:.2f} s")
 
238
  create_file(q, result, "md")
239
  return result
240
 
 
251
  )
252
  ans = c.choices[0].message.content
253
  st.write("GPT-4o: " + ans)
 
254
  create_file(text, ans, "md")
255
  st.session_state.messages.append({"role":"assistant","content":ans})
256
  return ans
 
266
  messages=[{"role":"user","content":text}]
267
  )
268
  ans = r.content[0].text
269
+ st.write("Claude-3.5: " + ans)
 
270
  create_file(text, ans, "md")
271
  st.session_state.chat_history.append({"user":text,"claude":ans})
272
  return ans
273
 
274
  def create_zip_of_files(md_files, mp3_files):
 
275
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
276
  all_files = md_files + mp3_files
277
  if not all_files:
 
290
  md_files = glob.glob("*.md")
291
  mp3_files = glob.glob("*.mp3")
292
 
 
293
  md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
294
 
295
  all_files = md_files + mp3_files
 
297
  groups = defaultdict(list)
298
  for f in all_files:
299
  fname = os.path.basename(f)
300
+ prefix = fname[:10]
301
  groups[prefix].append(f)
302
 
303
  for prefix in groups:
304
  groups[prefix].sort(key=lambda x: os.path.getmtime(x), reverse=True)
305
 
306
  sorted_prefixes = sorted(groups.keys(), key=lambda pre: max(os.path.getmtime(x) for x in groups[pre]), reverse=True)
 
307
  return groups, sorted_prefixes
308
 
309
  def extract_keywords_from_md(files):
 
363
  if st.button("πŸ—‘Del Group", key="del_group_"+prefix):
364
  for f in files:
365
  os.remove(f)
 
366
  st.success(f"Deleted all files in group {prefix} successfully!")
367
  st.session_state.should_rerun = True
368
 
 
371
  ctime = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
372
  st.write(f"**{fname}** - {ctime}")
373
 
374
+ def run_selected_model(option, user_input):
375
+ user_input = user_input.strip()
376
+ if option == "Arxiv":
377
+ st.subheader("Arxiv Only Results:")
378
+ perform_ai_lookup(user_input, vocal_summary=True, extended_refs=False, titles_summary=True)
379
+ elif option == "GPT-4o":
380
+ process_with_gpt(user_input)
381
+ elif option == "Claude-3.5":
382
+ process_with_claude(user_input)
383
+
384
  def main():
385
  st.sidebar.markdown("### 🚲BikeAIπŸ† Multi-Agent Research AI")
386
  tab_main = st.radio("Action:",["🎀 Voice Input","πŸ“Έ Media Gallery","πŸ” Search ArXiv","πŸ“ File Editor"],horizontal=True)
387
 
388
+ # Removed the old model_choice radio
389
+ # Instead, we rely on the dropdown in the Process Input section.
390
 
391
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
392
  val = mycomponent(my_input_value="Hello")
393
 
394
+ # Show input in a text box for editing if detected
395
  if val:
396
+ val_stripped = val.replace('\n', ' ')
397
+ edited_input = st.text_area("Edit your detected input:", value=val_stripped, height=100)
398
+ run_option = st.selectbox("Select AI Model:", ["Arxiv", "GPT-4o", "Claude-3.5"])
399
+ autorun = st.checkbox("AutoRun on input change", value=False)
400
+
401
+ input_changed = (val != st.session_state.old_val)
402
+
403
+ if autorun and input_changed:
404
+ # Automatically run the selected model if input changed
405
+ st.session_state.old_val = val
406
+ run_selected_model(run_option, edited_input)
407
+ else:
408
+ # If not autorun, show a button to run manually
409
+ if st.button("Process Input"):
410
  st.session_state.old_val = val
411
+ run_selected_model(run_option, edited_input)
412
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413
 
414
  if tab_main == "πŸ” Search ArXiv":
415
  st.subheader("πŸ” Search ArXiv")
 
426
  elif tab_main == "🎀 Voice Input":
427
  st.subheader("🎀 Voice Recognition")
428
  user_text = st.text_area("Message:", height=100)
429
+ user_text = user_text.strip().replace('\n', ' ')
430
  if st.button("Send πŸ“¨"):
431
+ # Default to GPT-4o here, or you could similarly provide options.
432
+ process_with_gpt(user_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
  st.subheader("πŸ“œ Chat History")
434
  t1,t2=st.tabs(["Claude History","GPT-4o History"])
435
  with t1: