Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -341,7 +341,7 @@ def process_video_with_gpt(video_input, user_prompt):
|
|
341 |
return response.choices[0].message.content
|
342 |
|
343 |
# ArXiv Search Functions
|
344 |
-
def
|
345 |
"""Search ArXiv papers using Hugging Face client."""
|
346 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
347 |
response = client.predict(
|
@@ -352,6 +352,92 @@ def search_arxiv(query):
|
|
352 |
)
|
353 |
return response
|
354 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
# Chat Processing Functions
|
356 |
def process_with_gpt(text_input):
|
357 |
"""Process text with GPT-4o."""
|
@@ -549,7 +635,7 @@ def main():
|
|
549 |
# Model Selection
|
550 |
model_choice = st.sidebar.radio(
|
551 |
"Choose AI Model:",
|
552 |
-
["GPT-4o", "Claude-3", "
|
553 |
)
|
554 |
|
555 |
# Chat Interface
|
@@ -578,7 +664,9 @@ def main():
|
|
578 |
with col3:
|
579 |
st.subheader("Arxiv and Mistral Research:")
|
580 |
with st.spinner("Searching ArXiv..."):
|
581 |
-
results = search_arxiv(user_input)
|
|
|
|
|
582 |
st.markdown(results)
|
583 |
|
584 |
# Display Chat History
|
@@ -587,9 +675,9 @@ def main():
|
|
587 |
|
588 |
with tab1:
|
589 |
for chat in st.session_state.chat_history:
|
590 |
-
st.text_area("You:", chat["user"], height=100
|
591 |
-
st.text_area("Claude:", chat["claude"], height=200
|
592 |
-
st.markdown("
|
593 |
|
594 |
with tab2:
|
595 |
for message in st.session_state.messages:
|
|
|
341 |
return response.choices[0].message.content
|
342 |
|
343 |
# ArXiv Search Functions
|
344 |
+
def search_arxiv_old(query):
|
345 |
"""Search ArXiv papers using Hugging Face client."""
|
346 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
347 |
response = client.predict(
|
|
|
352 |
)
|
353 |
return response
|
354 |
|
355 |
+
def search_arxiv(query):
|
356 |
+
|
357 |
+
st.write("Performing AI Lookup...")
|
358 |
+
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
359 |
+
|
360 |
+
result1 = client.predict(
|
361 |
+
prompt=query,
|
362 |
+
llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
363 |
+
stream_outputs=True,
|
364 |
+
api_name="/ask_llm"
|
365 |
+
)
|
366 |
+
st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
|
367 |
+
st.markdown(result1)
|
368 |
+
|
369 |
+
result2 = client.predict(
|
370 |
+
prompt=query,
|
371 |
+
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
372 |
+
stream_outputs=True,
|
373 |
+
api_name="/ask_llm"
|
374 |
+
)
|
375 |
+
st.markdown("### Mistral-7B-Instruct-v0.2 Result")
|
376 |
+
st.markdown(result2)
|
377 |
+
combined_result = f"{result1}\n\n{result2}"
|
378 |
+
#return combined_result
|
379 |
+
|
380 |
+
return responseall
|
381 |
+
|
382 |
+
|
383 |
+
# Function to generate a filename based on prompt and time (because names matter ๐)
|
384 |
+
def generate_filename(prompt, file_type):
|
385 |
+
central = pytz.timezone('US/Central')
|
386 |
+
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
|
387 |
+
safe_prompt = re.sub(r'\W+', '_', prompt)[:90]
|
388 |
+
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
389 |
+
|
390 |
+
# Function to create and save a file (and avoid the black hole of lost data ๐ณ)
|
391 |
+
def create_file(filename, prompt, response):
|
392 |
+
with open(filename, 'w', encoding='utf-8') as file:
|
393 |
+
file.write(prompt + "\n\n" + response)
|
394 |
+
|
395 |
+
|
396 |
+
def perform_ai_lookup(query):
|
397 |
+
start_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
398 |
+
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
399 |
+
response1 = client.predict(
|
400 |
+
query,
|
401 |
+
20,
|
402 |
+
"Semantic Search",
|
403 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
404 |
+
api_name="/update_with_rag_md"
|
405 |
+
)
|
406 |
+
Question = '### ๐ ' + query + '\r\n' # Format for markdown display with links
|
407 |
+
References = response1[0]
|
408 |
+
ReferenceLinks = extract_urls(References)
|
409 |
+
|
410 |
+
RunSecondQuery = True
|
411 |
+
results=''
|
412 |
+
if RunSecondQuery:
|
413 |
+
# Search 2 - Retrieve the Summary with Papers Context and Original Query
|
414 |
+
response2 = client.predict(
|
415 |
+
query,
|
416 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
417 |
+
True,
|
418 |
+
api_name="/ask_llm"
|
419 |
+
)
|
420 |
+
if len(response2) > 10:
|
421 |
+
Answer = response2
|
422 |
+
SpeechSynthesis(Answer)
|
423 |
+
# Restructure results to follow format of Question, Answer, References, ReferenceLinks
|
424 |
+
results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
|
425 |
+
st.markdown(results)
|
426 |
+
|
427 |
+
st.write('๐Run of Multi-Agent System Paper Summary Spec is Complete')
|
428 |
+
end_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
429 |
+
start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
|
430 |
+
end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
|
431 |
+
elapsed_seconds = end_timestamp - start_timestamp
|
432 |
+
st.write(f"Start time: {start_time}")
|
433 |
+
st.write(f"Finish time: {end_time}")
|
434 |
+
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
|
435 |
+
|
436 |
+
|
437 |
+
filename = generate_filename(query, "md")
|
438 |
+
create_file(filename, query, results)
|
439 |
+
return results
|
440 |
+
|
441 |
# Chat Processing Functions
|
442 |
def process_with_gpt(text_input):
|
443 |
"""Process text with GPT-4o."""
|
|
|
635 |
# Model Selection
|
636 |
model_choice = st.sidebar.radio(
|
637 |
"Choose AI Model:",
|
638 |
+
["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
|
639 |
)
|
640 |
|
641 |
# Chat Interface
|
|
|
664 |
with col3:
|
665 |
st.subheader("Arxiv and Mistral Research:")
|
666 |
with st.spinner("Searching ArXiv..."):
|
667 |
+
#results = search_arxiv(user_input)
|
668 |
+
results = perform_ai_lookup(user_input)
|
669 |
+
|
670 |
st.markdown(results)
|
671 |
|
672 |
# Display Chat History
|
|
|
675 |
|
676 |
with tab1:
|
677 |
for chat in st.session_state.chat_history:
|
678 |
+
st.text_area("You:", chat["user"], height=100)
|
679 |
+
st.text_area("Claude:", chat["claude"], height=200)
|
680 |
+
st.markdown(chat["claude"])
|
681 |
|
682 |
with tab2:
|
683 |
for message in st.session_state.messages:
|