Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,20 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
import os
|
3 |
import json
|
4 |
import random
|
5 |
-
|
6 |
-
|
7 |
-
# Imports
|
8 |
import base64
|
9 |
import glob
|
10 |
-
import json
|
11 |
import math
|
12 |
import openai
|
13 |
-
import os
|
14 |
import pytz
|
15 |
import re
|
16 |
import requests
|
17 |
-
import streamlit as st
|
18 |
import textract
|
19 |
import time
|
20 |
import zipfile
|
21 |
import huggingface_hub
|
22 |
import dotenv
|
23 |
-
import streamlit.components.v1 as components # Import Streamlit Components for HTML5
|
24 |
-
|
25 |
from audio_recorder_streamlit import audio_recorder
|
26 |
from bs4 import BeautifulSoup
|
27 |
from collections import deque
|
@@ -29,12 +22,6 @@ from datetime import datetime
|
|
29 |
from dotenv import load_dotenv
|
30 |
from huggingface_hub import InferenceClient
|
31 |
from io import BytesIO
|
32 |
-
from langchain.chat_models import ChatOpenAI
|
33 |
-
from langchain.chains import ConversationalRetrievalChain
|
34 |
-
from langchain.embeddings import OpenAIEmbeddings
|
35 |
-
from langchain.memory import ConversationBufferMemory
|
36 |
-
from langchain.text_splitter import CharacterTextSplitter
|
37 |
-
from langchain.vectorstores import FAISS
|
38 |
from openai import ChatCompletion
|
39 |
from PyPDF2 import PdfReader
|
40 |
from templates import bot_template, css, user_template
|
@@ -55,8 +42,56 @@ st.set_page_config(
|
|
55 |
}
|
56 |
)
|
57 |
|
58 |
-
PromptPrefix = 'Create a markdown outline and table with appropriate emojis for top ten graphic novel plotlines where you are defining the method steps of play for topic of '
|
59 |
-
PromptPrefix2 = 'Create a streamlit python app. Show full code listing. Create a UI implementing each feature creatively with python, streamlit, using variables and smart tables with word and idiom keys, creating reusable dense functions with graphic novel entity parameters, and data driven app with python libraries and streamlit components for Javascript and HTML5. Use appropriate emojis for labels to summarize and list parts, function, conditions for topic: '
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
st.markdown('''### ๐โจ๐ WordGameAI ''')
|
62 |
with st.expander("Help / About ๐", expanded=False):
|
@@ -156,6 +191,38 @@ roleplaying_glossary = {
|
|
156 |
}
|
157 |
|
158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
# 9. Sidebar with UI controls to review and re-run prompts and continue responses
|
160 |
@st.cache_resource
|
161 |
def get_table_download_link(file_path):
|
@@ -186,7 +253,6 @@ def get_table_download_link(file_path):
|
|
186 |
|
187 |
|
188 |
|
189 |
-
|
190 |
def FileSidebar():
|
191 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
192 |
# Compose a file sidebar of markdown md files:
|
@@ -230,77 +296,35 @@ def FileSidebar():
|
|
230 |
if len(file_contents) > 0:
|
231 |
if next_action=='open':
|
232 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
if next_action=='md':
|
234 |
st.markdown(file_contents)
|
235 |
-
|
236 |
-
buttonlabel = '๐Run with Llama and GPT.'
|
237 |
if st.button(key='RunWithLlamaandGPT', label = buttonlabel):
|
238 |
user_prompt = file_contents
|
239 |
-
|
240 |
-
# Llama versus GPT Battle!
|
241 |
-
all=""
|
242 |
-
try:
|
243 |
-
st.write('๐Running with Llama.')
|
244 |
-
response = StreamLLMChatResponse(file_contents)
|
245 |
-
filename = generate_filename(user_prompt, "md")
|
246 |
-
create_file(filename, file_contents, response, should_save)
|
247 |
-
all=response
|
248 |
-
#SpeechSynthesis(response)
|
249 |
-
except:
|
250 |
-
st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
|
251 |
-
|
252 |
-
# gpt
|
253 |
try:
|
254 |
-
|
255 |
-
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
256 |
-
filename2 = generate_filename(file_contents, choice)
|
257 |
-
create_file(filename2, user_prompt, response, should_save)
|
258 |
-
all=all+response2
|
259 |
-
#SpeechSynthesis(response2)
|
260 |
except:
|
261 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
262 |
-
|
263 |
-
SpeechSynthesis(all)
|
264 |
|
265 |
-
|
266 |
if next_action=='search':
|
267 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
268 |
-
st.write('๐Running with Llama and GPT.')
|
269 |
-
|
270 |
user_prompt = file_contents
|
271 |
-
|
272 |
-
# Llama versus GPT Battle!
|
273 |
-
all=""
|
274 |
-
try:
|
275 |
-
st.write('๐Running with Llama.')
|
276 |
-
response = StreamLLMChatResponse(file_contents)
|
277 |
-
filename = generate_filename(user_prompt, ".md")
|
278 |
-
create_file(filename, file_contents, response, should_save)
|
279 |
-
all=response
|
280 |
-
#SpeechSynthesis(response)
|
281 |
-
except:
|
282 |
-
st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
|
283 |
-
|
284 |
-
# gpt
|
285 |
try:
|
286 |
-
|
287 |
-
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
288 |
-
filename2 = generate_filename(file_contents, choice)
|
289 |
-
create_file(filename2, user_prompt, response, should_save)
|
290 |
-
all=all+response2
|
291 |
-
#SpeechSynthesis(response2)
|
292 |
except:
|
293 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
294 |
-
|
295 |
-
SpeechSynthesis(all)
|
296 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
297 |
|
298 |
|
299 |
FileSidebar()
|
300 |
|
301 |
|
302 |
-
|
303 |
-
|
304 |
|
305 |
# ---- Art Card Sidebar with Random Selection of image:
|
306 |
def get_image_as_base64(url):
|
@@ -317,12 +341,9 @@ def create_download_link(filename, base64_str):
|
|
317 |
|
318 |
# List of image URLs
|
319 |
image_urls = [
|
320 |
-
#"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/gikaT871Mm8k6wuv4pl_g.png",
|
321 |
"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/gv1xmIiXh1NGTeeV-cYF2.png",
|
322 |
"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/2YsnDyc_nDNW71PPKozdN.png",
|
323 |
"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/G_GkRD_IT3f14K7gWlbwi.png",
|
324 |
-
#"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/eGii5DvGIuCtWCU08_i-D.png",
|
325 |
-
#"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/2-KfxcuXRcTFiHf4XlNsX.png"
|
326 |
]
|
327 |
|
328 |
# Select a random URL from the list
|
@@ -382,7 +403,7 @@ def load_score(key):
|
|
382 |
return score_data["score"]
|
383 |
return 0
|
384 |
|
385 |
-
|
386 |
def search_glossary(query):
|
387 |
for category, terms in roleplaying_glossary.items():
|
388 |
if query.lower() in (term.lower() for term in terms):
|
@@ -391,25 +412,23 @@ def search_glossary(query):
|
|
391 |
|
392 |
all=""
|
393 |
|
394 |
-
query2 = PromptPrefix + query
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
filename = generate_filename(query2 + ' --- ' + response, "md")
|
399 |
-
create_file(filename, query, response, should_save)
|
400 |
-
|
401 |
-
query3 = PromptPrefix2 + query + ' creating streamlit functions that implement outline of method steps below: ' + response # Add prompt preface for coding task behavior
|
402 |
-
# st.write('## ' + query3)
|
403 |
-
st.write('## ๐ Coding with GPT.') # -------------------------------------------------------------------------------------------------
|
404 |
response2 = chat_with_model(query3)
|
405 |
-
|
406 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
407 |
|
408 |
-
all = '# Query: ' + query + '# Response: ' + response + '# Response2: ' + response2
|
409 |
-
filename_txt2 = generate_filename(query + ' --- ' + all, "md")
|
410 |
-
create_file(filename_txt2, query, all, should_save)
|
411 |
SpeechSynthesis(all)
|
412 |
return all
|
|
|
413 |
|
414 |
# Function to display the glossary in a structured format
|
415 |
def display_glossary(glossary, area):
|
@@ -443,8 +462,74 @@ def display_glossary_grid(roleplaying_glossary):
|
|
443 |
st.markdown(f"{term} {links_md}", unsafe_allow_html=True)
|
444 |
|
445 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
447 |
|
|
|
448 |
game_emojis = {
|
449 |
"Dungeons and Dragons": "๐",
|
450 |
"Call of Cthulhu": "๐",
|
@@ -548,89 +633,9 @@ def get_all_query_params(key):
|
|
548 |
|
549 |
def clear_query_params():
|
550 |
st.query_params()
|
551 |
-
|
552 |
-
|
553 |
-
# Function to display content or image based on a query
|
554 |
-
def display_content_or_image(query):
|
555 |
-
# Check if the query matches any glossary term
|
556 |
-
for category, terms in transhuman_glossary.items():
|
557 |
-
for term in terms:
|
558 |
-
if query.lower() in term.lower():
|
559 |
-
st.subheader(f"Found in {category}:")
|
560 |
-
st.write(term)
|
561 |
-
return True # Return after finding and displaying the first match
|
562 |
-
|
563 |
-
# Check for an image match in a predefined directory (adjust path as needed)
|
564 |
-
image_dir = "images" # Example directory where images are stored
|
565 |
-
image_path = f"{image_dir}/{query}.png" # Construct image path with query
|
566 |
-
if os.path.exists(image_path):
|
567 |
-
st.image(image_path, caption=f"Image for {query}")
|
568 |
-
return True
|
569 |
-
|
570 |
-
# If no content or image is found
|
571 |
-
st.warning("No matching content or image found.")
|
572 |
-
return False
|
573 |
-
|
574 |
|
575 |
|
576 |
|
577 |
-
|
578 |
-
# ------------------------------------
|
579 |
-
|
580 |
-
def add_Med_Licensing_Exam_Dataset():
|
581 |
-
import streamlit as st
|
582 |
-
from datasets import load_dataset
|
583 |
-
dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split
|
584 |
-
st.title("USMLE Step 1 Dataset Viewer")
|
585 |
-
if len(dataset) == 0:
|
586 |
-
st.write("๐ข The dataset is empty.")
|
587 |
-
else:
|
588 |
-
st.write("""
|
589 |
-
๐ Use the search box to filter questions or use the grid to scroll through the dataset.
|
590 |
-
""")
|
591 |
-
|
592 |
-
# ๐ฉโ๐ฌ Search Box
|
593 |
-
search_term = st.text_input("Search for a specific question:", "")
|
594 |
-
|
595 |
-
# ๐ Pagination
|
596 |
-
records_per_page = 100
|
597 |
-
num_records = len(dataset)
|
598 |
-
num_pages = max(int(num_records / records_per_page), 1)
|
599 |
-
|
600 |
-
# Skip generating the slider if num_pages is 1 (i.e., all records fit in one page)
|
601 |
-
if num_pages > 1:
|
602 |
-
page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1)))
|
603 |
-
else:
|
604 |
-
page_number = 1 # Only one page
|
605 |
-
|
606 |
-
# ๐ Display Data
|
607 |
-
start_idx = (page_number - 1) * records_per_page
|
608 |
-
end_idx = start_idx + records_per_page
|
609 |
-
|
610 |
-
# ๐งช Apply the Search Filter
|
611 |
-
filtered_data = []
|
612 |
-
for record in dataset[start_idx:end_idx]:
|
613 |
-
if isinstance(record, dict) and 'text' in record and 'id' in record:
|
614 |
-
if search_term:
|
615 |
-
if search_term.lower() in record['text'].lower():
|
616 |
-
st.markdown(record)
|
617 |
-
filtered_data.append(record)
|
618 |
-
else:
|
619 |
-
filtered_data.append(record)
|
620 |
-
|
621 |
-
# ๐ Render the Grid
|
622 |
-
for record in filtered_data:
|
623 |
-
st.write(f"## Question ID: {record['id']}")
|
624 |
-
st.write(f"### Question:")
|
625 |
-
st.write(f"{record['text']}")
|
626 |
-
st.write(f"### Answer:")
|
627 |
-
st.write(f"{record['answer']}")
|
628 |
-
st.write("---")
|
629 |
-
|
630 |
-
st.write(f"๐ Total Records: {num_records} | ๐ Displaying {start_idx+1} to {min(end_idx, num_records)}")
|
631 |
-
|
632 |
-
# 1. Constants and Top Level UI Variables
|
633 |
-
|
634 |
# My Inference API Copy
|
635 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
636 |
# Meta's Original - Chat HF Free Version:
|
@@ -644,84 +649,10 @@ headers = {
|
|
644 |
"Content-Type": "application/json"
|
645 |
}
|
646 |
key = os.getenv('OPENAI_API_KEY')
|
647 |
-
prompt =
|
648 |
should_save = st.sidebar.checkbox("๐พ Save", value=True, help="Save your session data.")
|
649 |
|
650 |
-
# 2. Prompt label button demo for LLM
|
651 |
-
def add_witty_humor_buttons():
|
652 |
-
with st.expander("Wit and Humor ๐คฃ", expanded=True):
|
653 |
-
# Tip about the Dromedary family
|
654 |
-
st.markdown("๐ฌ **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.")
|
655 |
-
|
656 |
-
# Define button descriptions
|
657 |
-
descriptions = {
|
658 |
-
"Generate Limericks ๐": "Write ten random adult limericks based on quotes that are tweet length and make you laugh ๐ญ",
|
659 |
-
"Wise Quotes ๐ง": "Generate ten wise quotes that are tweet length ๐ฆ",
|
660 |
-
"Funny Rhymes ๐ค": "Create ten funny rhymes that are tweet length ๐ถ",
|
661 |
-
"Medical Jokes ๐": "Create ten medical jokes that are tweet length ๐ฅ",
|
662 |
-
"Minnesota Humor โ๏ธ": "Create ten jokes about Minnesota that are tweet length ๐จ๏ธ",
|
663 |
-
"Top Funny Stories ๐": "Create ten funny stories that are tweet length ๐",
|
664 |
-
"More Funny Rhymes ๐๏ธ": "Create ten more funny rhymes that are tweet length ๐ต"
|
665 |
-
}
|
666 |
-
|
667 |
-
# Create columns
|
668 |
-
col1, col2, col3 = st.columns([1, 1, 1], gap="small")
|
669 |
-
|
670 |
-
# Add buttons to columns
|
671 |
-
if col1.button("Wise Limericks ๐"):
|
672 |
-
StreamLLMChatResponse(descriptions["Generate Limericks ๐"])
|
673 |
-
|
674 |
-
if col2.button("Wise Quotes ๐ง"):
|
675 |
-
StreamLLMChatResponse(descriptions["Wise Quotes ๐ง"])
|
676 |
-
|
677 |
-
#if col3.button("Funny Rhymes ๐ค"):
|
678 |
-
# StreamLLMChatResponse(descriptions["Funny Rhymes ๐ค"])
|
679 |
-
|
680 |
-
col4, col5, col6 = st.columns([1, 1, 1], gap="small")
|
681 |
-
|
682 |
-
if col4.button("Top Ten Funniest Clean Jokes ๐"):
|
683 |
-
StreamLLMChatResponse(descriptions["Top Ten Funniest Clean Jokes ๐"])
|
684 |
-
|
685 |
-
if col5.button("Minnesota Humor โ๏ธ"):
|
686 |
-
StreamLLMChatResponse(descriptions["Minnesota Humor โ๏ธ"])
|
687 |
-
|
688 |
-
if col6.button("Origins of Medical Science True Stories"):
|
689 |
-
StreamLLMChatResponse(descriptions["Origins of Medical Science True Stories"])
|
690 |
-
|
691 |
-
col7 = st.columns(1, gap="small")
|
692 |
-
|
693 |
-
if col7[0].button("Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"):
|
694 |
-
StreamLLMChatResponse(descriptions["Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"])
|
695 |
|
696 |
-
def SpeechSynthesis(result):
|
697 |
-
documentHTML5='''
|
698 |
-
<!DOCTYPE html>
|
699 |
-
<html>
|
700 |
-
<head>
|
701 |
-
<title>Read It Aloud</title>
|
702 |
-
<script type="text/javascript">
|
703 |
-
function readAloud() {
|
704 |
-
const text = document.getElementById("textArea").value;
|
705 |
-
const speech = new SpeechSynthesisUtterance(text);
|
706 |
-
window.speechSynthesis.speak(speech);
|
707 |
-
}
|
708 |
-
</script>
|
709 |
-
</head>
|
710 |
-
<body>
|
711 |
-
<h1>๐ Read It Aloud</h1>
|
712 |
-
<textarea id="textArea" rows="10" cols="80">
|
713 |
-
'''
|
714 |
-
documentHTML5 = documentHTML5 + result
|
715 |
-
documentHTML5 = documentHTML5 + '''
|
716 |
-
</textarea>
|
717 |
-
<br>
|
718 |
-
<button onclick="readAloud()">๐ Read Aloud</button>
|
719 |
-
</body>
|
720 |
-
</html>
|
721 |
-
'''
|
722 |
-
|
723 |
-
components.html(documentHTML5, width=1280, height=300)
|
724 |
-
#return result
|
725 |
|
726 |
|
727 |
# 3. Stream Llama Response
|
@@ -878,9 +809,11 @@ def read_file_content(file,max_length):
|
|
878 |
else:
|
879 |
return ""
|
880 |
|
|
|
881 |
# 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
|
882 |
@st.cache_resource
|
883 |
-
def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
|
|
|
884 |
model = model_choice
|
885 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
886 |
conversation.append({'role': 'user', 'content': prompt})
|
@@ -891,9 +824,8 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
|
|
891 |
res_box = st.empty()
|
892 |
collected_chunks = []
|
893 |
collected_messages = []
|
894 |
-
|
895 |
-
|
896 |
-
for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
|
897 |
collected_chunks.append(chunk)
|
898 |
chunk_message = chunk['choices'][0]['delta']
|
899 |
collected_messages.append(chunk_message)
|
@@ -910,9 +842,9 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
|
|
910 |
st.write(time.time() - start_time)
|
911 |
return full_reply_content
|
912 |
|
913 |
-
# 12. Embedding VectorDB for LLM query of documents to text to compress inputs and prompt together as Chat memory using Langchain
|
914 |
@st.cache_resource
|
915 |
-
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
|
|
916 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
917 |
conversation.append({'role': 'user', 'content': prompt})
|
918 |
if len(file_content)>0:
|
@@ -920,6 +852,7 @@ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
|
920 |
response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
|
921 |
return response['choices'][0]['message']['content']
|
922 |
|
|
|
923 |
def extract_mime_type(file):
|
924 |
if isinstance(file, str):
|
925 |
pattern = r"type='(.*?)'"
|
@@ -1022,25 +955,16 @@ def get_zip_download_link(zip_file):
|
|
1022 |
href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
1023 |
return href
|
1024 |
|
1025 |
-
# 14. Inference Endpoints for Whisper (best fastest STT) on NVIDIA T4 and Llama (best fastest AGI LLM) on NVIDIA A10
|
1026 |
-
# My Inference Endpoint
|
1027 |
API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
|
1028 |
-
# Original
|
1029 |
API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
|
1030 |
MODEL2 = "openai/whisper-small.en"
|
1031 |
MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
|
1032 |
-
#headers = {
|
1033 |
-
# "Authorization": "Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
1034 |
-
# "Content-Type": "audio/wav"
|
1035 |
-
#}
|
1036 |
-
# HF_KEY = os.getenv('HF_KEY')
|
1037 |
HF_KEY = st.secrets['HF_KEY']
|
1038 |
headers = {
|
1039 |
"Authorization": f"Bearer {HF_KEY}",
|
1040 |
"Content-Type": "audio/wav"
|
1041 |
}
|
1042 |
|
1043 |
-
#@st.cache_resource
|
1044 |
def query(filename):
|
1045 |
with open(filename, "rb") as f:
|
1046 |
data = f.read()
|
@@ -1070,10 +994,6 @@ def transcribe_audio(filename):
|
|
1070 |
return output
|
1071 |
|
1072 |
def whisper_main():
|
1073 |
-
#st.title("Speech to Text")
|
1074 |
-
#st.write("Record your speech and get the text.")
|
1075 |
-
|
1076 |
-
# Audio, transcribe, GPT:
|
1077 |
filename = save_and_play_audio(audio_recorder)
|
1078 |
if filename is not None:
|
1079 |
transcription = transcribe_audio(filename)
|
@@ -1085,23 +1005,17 @@ def whisper_main():
|
|
1085 |
transcript=''
|
1086 |
st.write(transcript)
|
1087 |
|
1088 |
-
|
1089 |
-
# Whisper to GPT: New!! ---------------------------------------------------------------------
|
1090 |
-
st.write('Reasoning with your inputs with GPT..')
|
1091 |
response = chat_with_model(transcript)
|
1092 |
st.write('Response:')
|
1093 |
st.write(response)
|
1094 |
-
|
1095 |
filename = generate_filename(response, "txt")
|
1096 |
create_file(filename, transcript, response, should_save)
|
1097 |
-
# Whisper to GPT: New!! ---------------------------------------------------------------------
|
1098 |
-
|
1099 |
|
1100 |
# Whisper to Llama:
|
1101 |
response = StreamLLMChatResponse(transcript)
|
1102 |
filename_txt = generate_filename(transcript, "md")
|
1103 |
create_file(filename_txt, transcript, response, should_save)
|
1104 |
-
|
1105 |
filename_wav = filename_txt.replace('.txt', '.wav')
|
1106 |
import shutil
|
1107 |
try:
|
@@ -1109,14 +1023,9 @@ def whisper_main():
|
|
1109 |
shutil.copyfile(filename, filename_wav)
|
1110 |
except:
|
1111 |
st.write('.')
|
1112 |
-
|
1113 |
if os.path.exists(filename):
|
1114 |
os.remove(filename)
|
1115 |
|
1116 |
-
#st.experimental_rerun()
|
1117 |
-
#except:
|
1118 |
-
# st.write('Starting Whisper Model on GPU. Please retry in 30 seconds.')
|
1119 |
-
|
1120 |
|
1121 |
|
1122 |
# Sample function to demonstrate a response, replace with your own logic
|
@@ -1125,59 +1034,22 @@ def StreamMedChatResponse(topic):
|
|
1125 |
|
1126 |
|
1127 |
|
1128 |
-
def add_medical_exam_buttons():
|
1129 |
-
# Medical exam terminology descriptions
|
1130 |
-
descriptions = {
|
1131 |
-
"White Blood Cells ๐": "3 Q&A with emojis about types, facts, function, inputs and outputs of white blood cells ๐ฅ",
|
1132 |
-
"CT Imaging๐ฆ ": "3 Q&A with emojis on CT Imaging post surgery, how to, what to look for ๐",
|
1133 |
-
"Hematoma ๐": "3 Q&A with emojis about hematoma and infection care and study including bacteria cultures and tests or labs๐ช",
|
1134 |
-
"Post Surgery Wound Care ๐": "3 Q&A with emojis on wound care, and good bedside manner ๐ฉธ",
|
1135 |
-
"Healing and humor ๐": "3 Q&A with emojis on stories and humor about healing and caregiving ๐",
|
1136 |
-
"Psychology of bedside manner ๐งฌ": "3 Q&A with emojis on bedside manner and how to make patients feel at ease๐ ",
|
1137 |
-
"CT scan ๐": "3 Q&A with analysis on infection using CT scan and packing for skin, cellulitus and fascia ๐ฉบ"
|
1138 |
-
}
|
1139 |
-
|
1140 |
-
# Expander for medical topics
|
1141 |
-
with st.expander("Medical Licensing Exam Topics ๐", expanded=False):
|
1142 |
-
st.markdown("๐ฉบ **Important**: Variety of topics for medical licensing exams.")
|
1143 |
-
|
1144 |
-
# Create buttons for each description with unique keys
|
1145 |
-
for idx, (label, content) in enumerate(descriptions.items()):
|
1146 |
-
button_key = f"button_{idx}"
|
1147 |
-
if st.button(label, key=button_key):
|
1148 |
-
st.write(f"Running {label}")
|
1149 |
-
input='Create markdown outline for definition of topic ' + label + ' also short quiz with appropriate emojis and definitions for: ' + content
|
1150 |
-
response=StreamLLMChatResponse(input)
|
1151 |
-
filename = generate_filename(response, 'txt')
|
1152 |
-
create_file(filename, input, response, should_save)
|
1153 |
-
|
1154 |
-
|
1155 |
-
|
1156 |
-
|
1157 |
# 17. Main
|
1158 |
def main():
|
1159 |
-
prompt =
|
1160 |
-
# Add Wit and Humor buttons
|
1161 |
-
# add_witty_humor_buttons()
|
1162 |
-
# add_medical_exam_buttons()
|
1163 |
-
|
1164 |
with st.expander("Prompts ๐", expanded=False):
|
1165 |
-
example_input = st.text_input("Enter your prompt text
|
1166 |
-
if st.button("Run Prompt
|
1167 |
try:
|
1168 |
response=StreamLLMChatResponse(example_input)
|
1169 |
create_file(filename, example_input, response, should_save)
|
1170 |
except:
|
1171 |
-
st.write('
|
1172 |
-
|
1173 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1174 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
1175 |
-
|
1176 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1177 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
1178 |
-
|
1179 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
1180 |
-
|
1181 |
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
1182 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
1183 |
with collength:
|
@@ -1208,8 +1080,6 @@ def main():
|
|
1208 |
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
1209 |
create_file(filename, user_prompt, response, should_save)
|
1210 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
1211 |
-
|
1212 |
-
|
1213 |
if st.button('๐ฌ Chat'):
|
1214 |
st.write('Reasoning with your inputs...')
|
1215 |
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
@@ -1262,18 +1132,14 @@ def main():
|
|
1262 |
|
1263 |
|
1264 |
|
1265 |
-
# Feedback
|
1266 |
-
# Step: Give User a Way to Upvote or Downvote
|
1267 |
GiveFeedback=False
|
1268 |
if GiveFeedback:
|
1269 |
with st.expander("Give your feedback ๐", expanded=False):
|
1270 |
-
|
1271 |
feedback = st.radio("Step 8: Give your feedback", ("๐ Upvote", "๐ Downvote"))
|
1272 |
if feedback == "๐ Upvote":
|
1273 |
st.write("You upvoted ๐. Thank you for your feedback!")
|
1274 |
else:
|
1275 |
st.write("You downvoted ๐. Thank you for your feedback!")
|
1276 |
-
|
1277 |
load_dotenv()
|
1278 |
st.write(css, unsafe_allow_html=True)
|
1279 |
st.header("Chat with documents :books:")
|
@@ -1294,31 +1160,22 @@ def main():
|
|
1294 |
filename = generate_filename(raw, 'txt')
|
1295 |
create_file(filename, raw, '', should_save)
|
1296 |
|
1297 |
-
|
1298 |
-
# Display instructions and handle query parameters
|
1299 |
-
#st.markdown("## Glossary Lookup\nEnter a term in the URL query, like `?q=Nanotechnology` or `?query=Martian Syndicate`.")
|
1300 |
-
|
1301 |
try:
|
1302 |
query_params = st.query_params
|
1303 |
-
#query = (query_params.get('q') or query_params.get('query') or [''])[0]
|
1304 |
query = (query_params.get('q') or query_params.get('query') or [''])
|
1305 |
-
st.markdown('# Running query: ' + query)
|
1306 |
if query: search_glossary(query)
|
1307 |
except:
|
1308 |
st.markdown(' ')
|
1309 |
|
1310 |
# Display the glossary grid
|
1311 |
st.markdown("### ๐ฒ๐บ๏ธ Word Game Gallery")
|
1312 |
-
|
1313 |
-
|
1314 |
-
|
1315 |
-
|
1316 |
-
|
1317 |
-
# Assuming the transhuman_glossary and other setup code remains the same
|
1318 |
-
#st.write("Current Query Parameters:", st.query_params)
|
1319 |
-
#st.markdown("### Query Parameters - These Deep Link Map to Remixable Methods, Navigate or Trigger Functionalities")
|
1320 |
|
1321 |
-
# Example: Using query parameters to navigate or trigger functionalities
|
1322 |
if 'action' in st.query_params:
|
1323 |
action = st.query_params()['action'][0] # Get the first (or only) 'action' parameter
|
1324 |
if action == 'show_message':
|
|
|
1 |
import streamlit as st
|
2 |
+
import streamlit.components.v1 as components
|
3 |
import os
|
4 |
import json
|
5 |
import random
|
|
|
|
|
|
|
6 |
import base64
|
7 |
import glob
|
|
|
8 |
import math
|
9 |
import openai
|
|
|
10 |
import pytz
|
11 |
import re
|
12 |
import requests
|
|
|
13 |
import textract
|
14 |
import time
|
15 |
import zipfile
|
16 |
import huggingface_hub
|
17 |
import dotenv
|
|
|
|
|
18 |
from audio_recorder_streamlit import audio_recorder
|
19 |
from bs4 import BeautifulSoup
|
20 |
from collections import deque
|
|
|
22 |
from dotenv import load_dotenv
|
23 |
from huggingface_hub import InferenceClient
|
24 |
from io import BytesIO
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
from openai import ChatCompletion
|
26 |
from PyPDF2 import PdfReader
|
27 |
from templates import bot_template, css, user_template
|
|
|
42 |
}
|
43 |
)
|
44 |
|
45 |
+
#PromptPrefix = 'Create a markdown outline and table with appropriate emojis for top ten graphic novel plotlines where you are defining the method steps of play for topic of '
|
46 |
+
#PromptPrefix2 = 'Create a streamlit python app. Show full code listing. Create a UI implementing each feature creatively with python, streamlit, using variables and smart tables with word and idiom keys, creating reusable dense functions with graphic novel entity parameters, and data driven app with python libraries and streamlit components for Javascript and HTML5. Use appropriate emojis for labels to summarize and list parts, function, conditions for topic: '
|
47 |
+
|
48 |
+
# Prompts for App, for App Product, and App Product Code
|
49 |
+
PromptPrefix = 'Create a word game rule set and background story with streamlit markdown outlines and tables with appropriate emojis for methodical step by step rules defining the game play rules. Use story structure architect rules to plan, structure and write three dramatic situations to include in the word game rules matching the theme for topic of '
|
50 |
+
PromptPrefix2 = 'Create a streamlit python user app with full code listing to create a UI implementing the plans, structure, situations and tables as python functions creating a word game with parts of speech and humorous word play which operates like word game rules and creates a compelling fun story using streamlit to create user interface elements like emoji buttons, sliders, drop downs, and data interfaces like dataframes to show tables, session_state to track inventory, character advancement and experience, locations, file_uploader to allow the user to add images which are saved and referenced shown in gallery, camera_input to take character picture, on_change = function callbacks with continual running plots that change when you change data or click a button, randomness and word and letter rolls using emojis and st.markdown, st.expander for groupings and clusters of things, st.columns and other UI controls in streamlit as a game. Create inline data tables and list dictionaries for entities implemented as variables for the word game rule entities and stats. Design it as a fun data driven game app and show full python code listing for this ruleset and thematic story plot line: '
|
51 |
+
PromptPrefix3 = 'Create a HTML5 aframe and javascript app using appropriate libraries to create a word game simulation with advanced libraries like aframe to render 3d scenes creating moving entities that stay within a bounding box but show text and animation in 3d for inventory, components and story entities. Show full code listing. Add a list of new random entities say 3 of a few different types to any list appropriately and use emojis to make things easier and fun to read. Use appropriate emojis in labels. Create the UI to implement storytelling in the style of a dungeon master, with features using three emoji appropriate text plot twists and recurring interesting funny fascinating and complex almost poetic named characters with genius traits and file IO, randomness, ten point choice lists, math distribution tradeoffs, witty humorous dilemnas with emoji , rewards, variables, reusable functions with parameters, and data driven app with python libraries and streamlit components for Javascript and HTML5. Use appropriate emojis for labels to summarize and list parts, function, conditions for topic:'
|
52 |
+
|
53 |
+
|
54 |
+
# Function to display the entire glossary in a grid format with links
|
55 |
+
def display_glossary_grid(roleplaying_glossary):
|
56 |
+
search_urls = {
|
57 |
+
"๐": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
|
58 |
+
"๐": lambda k: f"https://www.google.com/search?q={quote(k)}",
|
59 |
+
"โถ๏ธ": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
|
60 |
+
"๐": lambda k: f"https://www.bing.com/search?q={quote(k)}",
|
61 |
+
"๐ฆ": lambda k: f"https://twitter.com/search?q={quote(k)}",
|
62 |
+
"๐ฒ": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(k)}", # this url plus query!
|
63 |
+
"๐": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(PromptPrefix)}{quote(k)}", # this url plus query!
|
64 |
+
"๐": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(PromptPrefix2)}{quote(k)}", # this url plus query!
|
65 |
+
"๐": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(PromptPrefix3)}{quote(k)}", # this url plus query!
|
66 |
+
}
|
67 |
+
|
68 |
+
for category, details in roleplaying_glossary.items():
|
69 |
+
st.write(f"### {category}")
|
70 |
+
cols = st.columns(len(details)) # Create dynamic columns based on the number of games
|
71 |
+
for idx, (game, terms) in enumerate(details.items()):
|
72 |
+
with cols[idx]:
|
73 |
+
st.markdown(f"#### {game}")
|
74 |
+
for term in terms:
|
75 |
+
links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()])
|
76 |
+
st.markdown(f"{term} {links_md}", unsafe_allow_html=True)
|
77 |
+
|
78 |
+
def display_glossary_entity(k):
|
79 |
+
search_urls = {
|
80 |
+
"๐": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
|
81 |
+
"๐": lambda k: f"https://www.google.com/search?q={quote(k)}",
|
82 |
+
"โถ๏ธ": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
|
83 |
+
"๐": lambda k: f"https://www.bing.com/search?q={quote(k)}",
|
84 |
+
"๐ฆ": lambda k: f"https://twitter.com/search?q={quote(k)}",
|
85 |
+
"๐ฒ": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(k)}", # this url plus query!
|
86 |
+
"๐": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(PromptPrefix)}{quote(k)}", # this url plus query!
|
87 |
+
"๐": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(PromptPrefix2)}{quote(k)}", # this url plus query!
|
88 |
+
"๐": lambda k: f"https://huggingface.co/spaces/awacke1/WordGameAI?q={quote(PromptPrefix3)}{quote(k)}", # this url plus query!
|
89 |
+
}
|
90 |
+
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
|
91 |
+
st.markdown(f"{k} {links_md}", unsafe_allow_html=True)
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
|
96 |
st.markdown('''### ๐โจ๐ WordGameAI ''')
|
97 |
with st.expander("Help / About ๐", expanded=False):
|
|
|
191 |
}
|
192 |
|
193 |
|
194 |
+
|
195 |
+
# HTML5 based Speech Synthesis (Text to Speech in Browser)
|
196 |
+
@st.cache_resource
|
197 |
+
def SpeechSynthesis(result):
|
198 |
+
documentHTML5='''
|
199 |
+
<!DOCTYPE html>
|
200 |
+
<html>
|
201 |
+
<head>
|
202 |
+
<title>Read It Aloud</title>
|
203 |
+
<script type="text/javascript">
|
204 |
+
function readAloud() {
|
205 |
+
const text = document.getElementById("textArea").value;
|
206 |
+
const speech = new SpeechSynthesisUtterance(text);
|
207 |
+
window.speechSynthesis.speak(speech);
|
208 |
+
}
|
209 |
+
</script>
|
210 |
+
</head>
|
211 |
+
<body>
|
212 |
+
<h1>๐ Read It Aloud</h1>
|
213 |
+
<textarea id="textArea" rows="10" cols="80">
|
214 |
+
'''
|
215 |
+
documentHTML5 = documentHTML5 + result
|
216 |
+
documentHTML5 = documentHTML5 + '''
|
217 |
+
</textarea>
|
218 |
+
<br>
|
219 |
+
<button onclick="readAloud()">๐ Read Aloud</button>
|
220 |
+
</body>
|
221 |
+
</html>
|
222 |
+
'''
|
223 |
+
components.html(documentHTML5, width=1280, height=300)
|
224 |
+
|
225 |
+
|
226 |
# 9. Sidebar with UI controls to review and re-run prompts and continue responses
|
227 |
@st.cache_resource
|
228 |
def get_table_download_link(file_path):
|
|
|
253 |
|
254 |
|
255 |
|
|
|
256 |
def FileSidebar():
|
257 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
258 |
# Compose a file sidebar of markdown md files:
|
|
|
296 |
if len(file_contents) > 0:
|
297 |
if next_action=='open':
|
298 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
299 |
+
try:
|
300 |
+
if st.button("๐", key="filecontentssearch"):
|
301 |
+
search_glossary(file_content_area)
|
302 |
+
except:
|
303 |
+
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
304 |
+
|
305 |
if next_action=='md':
|
306 |
st.markdown(file_contents)
|
307 |
+
buttonlabel = '๐Run'
|
|
|
308 |
if st.button(key='RunWithLlamaandGPT', label = buttonlabel):
|
309 |
user_prompt = file_contents
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
310 |
try:
|
311 |
+
search_glossary(file_contents)
|
|
|
|
|
|
|
|
|
|
|
312 |
except:
|
313 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
|
|
|
|
314 |
|
|
|
315 |
if next_action=='search':
|
316 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
|
|
|
|
317 |
user_prompt = file_contents
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
try:
|
319 |
+
search_glossary(file_contents)
|
|
|
|
|
|
|
|
|
|
|
320 |
except:
|
321 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
|
|
|
|
322 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
323 |
|
324 |
|
325 |
FileSidebar()
|
326 |
|
327 |
|
|
|
|
|
328 |
|
329 |
# ---- Art Card Sidebar with Random Selection of image:
|
330 |
def get_image_as_base64(url):
|
|
|
341 |
|
342 |
# List of image URLs
|
343 |
image_urls = [
|
|
|
344 |
"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/gv1xmIiXh1NGTeeV-cYF2.png",
|
345 |
"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/2YsnDyc_nDNW71PPKozdN.png",
|
346 |
"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/G_GkRD_IT3f14K7gWlbwi.png",
|
|
|
|
|
347 |
]
|
348 |
|
349 |
# Select a random URL from the list
|
|
|
403 |
return score_data["score"]
|
404 |
return 0
|
405 |
|
406 |
+
@st.cache_resource
|
407 |
def search_glossary(query):
|
408 |
for category, terms in roleplaying_glossary.items():
|
409 |
if query.lower() in (term.lower() for term in terms):
|
|
|
412 |
|
413 |
all=""
|
414 |
|
415 |
+
query2 = PromptPrefix + query
|
416 |
+
response = chat_with_model(query2)
|
417 |
+
|
418 |
+
query3 = PromptPrefix2 + query + ' for story outline of method steps: ' + response # Add prompt preface for coding task behavior
|
|
|
|
|
|
|
|
|
|
|
|
|
419 |
response2 = chat_with_model(query3)
|
420 |
+
|
421 |
+
query4 = PromptPrefix3 + query + ' using this streamlit python programspecification to define features. Create entities for each variable and generate UI with HTML5 and JS that matches the streamlit program: ' + response2 # Add prompt preface for coding task behavior
|
422 |
+
response3 = chat_with_model(query4)
|
423 |
+
|
424 |
+
all = query + ' ' + response + ' ' + response2 + ' ' + response3
|
425 |
+
|
426 |
+
filename = generate_filename(all, "md")
|
427 |
+
create_file(filename, query, all, should_save)
|
428 |
|
|
|
|
|
|
|
429 |
SpeechSynthesis(all)
|
430 |
return all
|
431 |
+
|
432 |
|
433 |
# Function to display the glossary in a structured format
|
434 |
def display_glossary(glossary, area):
|
|
|
462 |
st.markdown(f"{term} {links_md}", unsafe_allow_html=True)
|
463 |
|
464 |
|
465 |
+
@st.cache_resource
|
466 |
+
def display_videos_and_links():
|
467 |
+
video_files = [f for f in os.listdir('.') if f.endswith('.mp4')]
|
468 |
+
if not video_files:
|
469 |
+
st.write("No MP4 videos found in the current directory.")
|
470 |
+
return
|
471 |
+
|
472 |
+
video_files_sorted = sorted(video_files, key=lambda x: len(x.split('.')[0]))
|
473 |
+
|
474 |
+
cols = st.columns(2) # Define 2 columns outside the loop
|
475 |
+
col_index = 0 # Initialize column index
|
476 |
|
477 |
+
for video_file in video_files_sorted:
|
478 |
+
with cols[col_index % 2]: # Use modulo 2 to alternate between the first and second column
|
479 |
+
# Embedding video with autoplay and loop using HTML
|
480 |
+
#video_html = ("""<video width="100%" loop autoplay> <source src="{video_file}" type="video/mp4">Your browser does not support the video tag.</video>""")
|
481 |
+
#st.markdown(video_html, unsafe_allow_html=True)
|
482 |
+
k = video_file.split('.')[0] # Assumes keyword is the file name without extension
|
483 |
+
st.video(video_file, format='video/mp4', start_time=0)
|
484 |
+
display_glossary_entity(k)
|
485 |
+
col_index += 1 # Increment column index to place the next video in the next column
|
486 |
+
|
487 |
+
@st.cache_resource
|
488 |
+
def display_images_and_wikipedia_summaries():
|
489 |
+
image_files = [f for f in os.listdir('.') if f.endswith('.png')]
|
490 |
+
if not image_files:
|
491 |
+
st.write("No PNG images found in the current directory.")
|
492 |
+
return
|
493 |
+
image_files_sorted = sorted(image_files, key=lambda x: len(x.split('.')[0]))
|
494 |
+
grid_sizes = [len(f.split('.')[0]) for f in image_files_sorted]
|
495 |
+
col_sizes = ['small' if size <= 4 else 'medium' if size <= 8 else 'large' for size in grid_sizes]
|
496 |
+
num_columns_map = {"small": 4, "medium": 3, "large": 2}
|
497 |
+
current_grid_size = 0
|
498 |
+
for image_file, col_size in zip(image_files_sorted, col_sizes):
|
499 |
+
if current_grid_size != num_columns_map[col_size]:
|
500 |
+
cols = st.columns(num_columns_map[col_size])
|
501 |
+
current_grid_size = num_columns_map[col_size]
|
502 |
+
col_index = 0
|
503 |
+
with cols[col_index % current_grid_size]:
|
504 |
+
image = Image.open(image_file)
|
505 |
+
st.image(image, caption=image_file, use_column_width=True)
|
506 |
+
k = image_file.split('.')[0] # Assumes keyword is the file name without extension
|
507 |
+
display_glossary_entity(k)
|
508 |
+
|
509 |
+
def get_all_query_params(key):
|
510 |
+
return st.query_params().get(key, [])
|
511 |
+
|
512 |
+
def clear_query_params():
|
513 |
+
st.query_params()
|
514 |
+
|
515 |
+
# Function to display content or image based on a query
|
516 |
+
@st.cache_resource
|
517 |
+
def display_content_or_image(query):
|
518 |
+
for category, terms in transhuman_glossary.items():
|
519 |
+
for term in terms:
|
520 |
+
if query.lower() in term.lower():
|
521 |
+
st.subheader(f"Found in {category}:")
|
522 |
+
st.write(term)
|
523 |
+
return True # Return after finding and displaying the first match
|
524 |
+
image_dir = "images" # Example directory where images are stored
|
525 |
+
image_path = f"{image_dir}/{query}.png" # Construct image path with query
|
526 |
+
if os.path.exists(image_path):
|
527 |
+
st.image(image_path, caption=f"Image for {query}")
|
528 |
+
return True
|
529 |
+
st.warning("No matching content or image found.")
|
530 |
+
return False
|
531 |
|
532 |
+
|
533 |
game_emojis = {
|
534 |
"Dungeons and Dragons": "๐",
|
535 |
"Call of Cthulhu": "๐",
|
|
|
633 |
|
634 |
def clear_query_params():
|
635 |
st.query_params()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
636 |
|
637 |
|
638 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
639 |
# My Inference API Copy
|
640 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
641 |
# Meta's Original - Chat HF Free Version:
|
|
|
649 |
"Content-Type": "application/json"
|
650 |
}
|
651 |
key = os.getenv('OPENAI_API_KEY')
|
652 |
+
prompt = "...."
|
653 |
should_save = st.sidebar.checkbox("๐พ Save", value=True, help="Save your session data.")
|
654 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
655 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
656 |
|
657 |
|
658 |
# 3. Stream Llama Response
|
|
|
809 |
else:
|
810 |
return ""
|
811 |
|
812 |
+
|
813 |
# 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS
|
814 |
@st.cache_resource
|
815 |
+
def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
|
816 |
+
#def chat_with_model(prompt, document_section='', model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
|
817 |
model = model_choice
|
818 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
819 |
conversation.append({'role': 'user', 'content': prompt})
|
|
|
824 |
res_box = st.empty()
|
825 |
collected_chunks = []
|
826 |
collected_messages = []
|
827 |
+
|
828 |
+
for chunk in openai.ChatCompletion.create(model=model_choice, messages=conversation, temperature=0.5, stream=True):
|
|
|
829 |
collected_chunks.append(chunk)
|
830 |
chunk_message = chunk['choices'][0]['delta']
|
831 |
collected_messages.append(chunk_message)
|
|
|
842 |
st.write(time.time() - start_time)
|
843 |
return full_reply_content
|
844 |
|
|
|
845 |
@st.cache_resource
|
846 |
+
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
|
847 |
+
#def chat_with_file_contents(prompt, file_content, model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
|
848 |
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
849 |
conversation.append({'role': 'user', 'content': prompt})
|
850 |
if len(file_content)>0:
|
|
|
852 |
response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
|
853 |
return response['choices'][0]['message']['content']
|
854 |
|
855 |
+
|
856 |
def extract_mime_type(file):
|
857 |
if isinstance(file, str):
|
858 |
pattern = r"type='(.*?)'"
|
|
|
955 |
href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
|
956 |
return href
|
957 |
|
|
|
|
|
958 |
API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
|
|
|
959 |
API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en"
|
960 |
MODEL2 = "openai/whisper-small.en"
|
961 |
MODEL2_URL = "https://huggingface.co/openai/whisper-small.en"
|
|
|
|
|
|
|
|
|
|
|
962 |
HF_KEY = st.secrets['HF_KEY']
|
963 |
headers = {
|
964 |
"Authorization": f"Bearer {HF_KEY}",
|
965 |
"Content-Type": "audio/wav"
|
966 |
}
|
967 |
|
|
|
968 |
def query(filename):
|
969 |
with open(filename, "rb") as f:
|
970 |
data = f.read()
|
|
|
994 |
return output
|
995 |
|
996 |
def whisper_main():
|
|
|
|
|
|
|
|
|
997 |
filename = save_and_play_audio(audio_recorder)
|
998 |
if filename is not None:
|
999 |
transcription = transcribe_audio(filename)
|
|
|
1005 |
transcript=''
|
1006 |
st.write(transcript)
|
1007 |
|
1008 |
+
st.write('Reasoning with your inputs..')
|
|
|
|
|
1009 |
response = chat_with_model(transcript)
|
1010 |
st.write('Response:')
|
1011 |
st.write(response)
|
|
|
1012 |
filename = generate_filename(response, "txt")
|
1013 |
create_file(filename, transcript, response, should_save)
|
|
|
|
|
1014 |
|
1015 |
# Whisper to Llama:
|
1016 |
response = StreamLLMChatResponse(transcript)
|
1017 |
filename_txt = generate_filename(transcript, "md")
|
1018 |
create_file(filename_txt, transcript, response, should_save)
|
|
|
1019 |
filename_wav = filename_txt.replace('.txt', '.wav')
|
1020 |
import shutil
|
1021 |
try:
|
|
|
1023 |
shutil.copyfile(filename, filename_wav)
|
1024 |
except:
|
1025 |
st.write('.')
|
|
|
1026 |
if os.path.exists(filename):
|
1027 |
os.remove(filename)
|
1028 |
|
|
|
|
|
|
|
|
|
1029 |
|
1030 |
|
1031 |
# Sample function to demonstrate a response, replace with your own logic
|
|
|
1034 |
|
1035 |
|
1036 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1037 |
# 17. Main
|
1038 |
def main():
|
1039 |
+
prompt = PromptPrefix2
|
|
|
|
|
|
|
|
|
1040 |
with st.expander("Prompts ๐", expanded=False):
|
1041 |
+
example_input = st.text_input("Enter your prompt text:", value=prompt, help="Enter text to get a response.")
|
1042 |
+
if st.button("Run Prompt", help="Click to run."):
|
1043 |
try:
|
1044 |
response=StreamLLMChatResponse(example_input)
|
1045 |
create_file(filename, example_input, response, should_save)
|
1046 |
except:
|
1047 |
+
st.write('model is asleep. Starting now on A10 GPU. Please wait one minute then retry. KEDA triggered.')
|
|
|
1048 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1049 |
if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
|
|
1050 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1051 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
|
|
1052 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
|
|
1053 |
user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
1054 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
1055 |
with collength:
|
|
|
1080 |
filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
|
1081 |
create_file(filename, user_prompt, response, should_save)
|
1082 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
|
|
|
|
1083 |
if st.button('๐ฌ Chat'):
|
1084 |
st.write('Reasoning with your inputs...')
|
1085 |
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
|
|
1132 |
|
1133 |
|
1134 |
|
|
|
|
|
1135 |
GiveFeedback=False
|
1136 |
if GiveFeedback:
|
1137 |
with st.expander("Give your feedback ๐", expanded=False):
|
|
|
1138 |
feedback = st.radio("Step 8: Give your feedback", ("๐ Upvote", "๐ Downvote"))
|
1139 |
if feedback == "๐ Upvote":
|
1140 |
st.write("You upvoted ๐. Thank you for your feedback!")
|
1141 |
else:
|
1142 |
st.write("You downvoted ๐. Thank you for your feedback!")
|
|
|
1143 |
load_dotenv()
|
1144 |
st.write(css, unsafe_allow_html=True)
|
1145 |
st.header("Chat with documents :books:")
|
|
|
1160 |
filename = generate_filename(raw, 'txt')
|
1161 |
create_file(filename, raw, '', should_save)
|
1162 |
|
1163 |
+
|
|
|
|
|
|
|
1164 |
try:
|
1165 |
query_params = st.query_params
|
|
|
1166 |
query = (query_params.get('q') or query_params.get('query') or [''])
|
|
|
1167 |
if query: search_glossary(query)
|
1168 |
except:
|
1169 |
st.markdown(' ')
|
1170 |
|
1171 |
# Display the glossary grid
|
1172 |
st.markdown("### ๐ฒ๐บ๏ธ Word Game Gallery")
|
1173 |
+
|
1174 |
+
display_videos_and_links() # Video Jump Grid
|
1175 |
+
display_images_and_wikipedia_summaries() # Image Jump Grid
|
1176 |
+
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid
|
1177 |
+
display_buttons_with_scores() # Feedback Jump Grid
|
|
|
|
|
|
|
1178 |
|
|
|
1179 |
if 'action' in st.query_params:
|
1180 |
action = st.query_params()['action'][0] # Get the first (or only) 'action' parameter
|
1181 |
if action == 'show_message':
|