Spaces:
Running
Running
File size: 7,215 Bytes
7f46a81 28c58dc 7873f3c 7f46a81 d40fd1b 7873f3c d40fd1b 28c58dc 7f46a81 d4d8ea9 28c58dc 7873f3c d4d8ea9 486e4f5 29b122c 486e4f5 adf3dc3 7f46a81 28c58dc d26ed68 7873f3c 7f46a81 486e4f5 7873f3c 486e4f5 d4d8ea9 d40fd1b 0b11bc0 d4d8ea9 0aa3b05 d4d8ea9 0aa3b05 d4d8ea9 0aa3b05 6975b52 486e4f5 1f16a4c 7873f3c 0aa3b05 fe4e974 7873f3c 0b11bc0 d4d8ea9 673067b 0aa3b05 7f46a81 d4d8ea9 7873f3c 28c58dc 7873f3c 7f46a81 28c58dc 7f46a81 347c81e 7f46a81 d4d8ea9 7f46a81 d4d8ea9 7f46a81 d26ed68 28c58dc d4d8ea9 0b11bc0 7f46a81 0b11bc0 fe4e974 d26ed68 0b11bc0 d26ed68 486e4f5 28c58dc 486e4f5 28c58dc 486e4f5 7873f3c 28c58dc 0b11bc0 7873f3c 28c58dc d26ed68 7f46a81 d4d8ea9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
from omegaconf import OmegaConf
from query import VectaraQuery
import os
from PIL import Image
import uuid
import streamlit as st
from streamlit_pills import pills
from streamlit_feedback import streamlit_feedback
from utils import thumbs_feedback, send_amplitude_data, escape_dollars_outside_latex
max_examples = 6
languages = {'English': 'eng', 'Spanish': 'spa', 'French': 'fra', 'Chinese': 'zho', 'German': 'deu', 'Hindi': 'hin', 'Arabic': 'ara',
'Portuguese': 'por', 'Italian': 'ita', 'Japanese': 'jpn', 'Korean': 'kor', 'Russian': 'rus', 'Turkish': 'tur', 'Persian (Farsi)': 'fas',
'Vietnamese': 'vie', 'Thai': 'tha', 'Hebrew': 'heb', 'Dutch': 'nld', 'Indonesian': 'ind', 'Polish': 'pol', 'Ukrainian': 'ukr',
'Romanian': 'ron', 'Swedish': 'swe', 'Czech': 'ces', 'Greek': 'ell', 'Bengali': 'ben', 'Malay (or Malaysian)': 'msa', 'Urdu': 'urd'}
# Setup for HTTP API Calls to Amplitude Analytics
if 'device_id' not in st.session_state:
st.session_state.device_id = str(uuid.uuid4())
if "feedback_key" not in st.session_state:
st.session_state.feedback_key = 0
def isTrue(x) -> bool:
if isinstance(x, bool):
return x
return x.strip().lower() == 'true'
def launch_bot():
def reset():
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
st.session_state.ex_prompt = None
st.session_state.first_turn = True
def generate_response(question):
response = vq.submit_query(question, languages[st.session_state.language])
return response
def generate_streaming_response(question):
response = vq.submit_query_streaming(question, languages[st.session_state.language])
return response
def show_example_questions():
if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
if selected_example:
st.session_state.ex_prompt = selected_example
st.session_state.first_turn = False
return True
return False
if 'cfg' not in st.session_state:
corpus_keys = str(os.environ['corpus_keys']).split(',')
cfg = OmegaConf.create({
'corpus_keys': corpus_keys,
'api_key': str(os.environ['api_key']),
'title': os.environ['title'],
'source_data_desc': os.environ['source_data_desc'],
'streaming': isTrue(os.environ.get('streaming', False)),
'prompt_name': os.environ.get('prompt_name', None),
'examples': os.environ.get('examples', None),
'language': 'English'
})
st.session_state.cfg = cfg
st.session_state.ex_prompt = None
st.session_state.first_turn = True
st.session_state.language = cfg.language
example_messages = [example.strip() for example in cfg.examples.split(",")]
st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
cfg = st.session_state.cfg
vq = st.session_state.vq
st.set_page_config(page_title=cfg.title, layout="wide")
# left side content
with st.sidebar:
image = Image.open('Vectara-logo.png')
st.image(image, width=175)
st.markdown(f"## About\n\n"
f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n")
cfg.language = st.selectbox('Language:', languages.keys())
if st.session_state.language != cfg.language:
st.session_state.language = cfg.language
reset()
st.rerun()
st.markdown("\n")
bc1, _ = st.columns([1, 1])
with bc1:
if st.button('Start Over'):
reset()
st.rerun()
st.markdown("---")
st.markdown(
"## How this works?\n"
"This app was built with [Vectara](https://vectara.com).\n"
"Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
)
st.markdown("---")
st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)
if "messages" not in st.session_state.keys():
reset()
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
example_container = st.empty()
with example_container:
if show_example_questions():
example_container.empty()
st.rerun()
# select prompt from example question or user provided input
if st.session_state.ex_prompt:
prompt = st.session_state.ex_prompt
else:
prompt = st.chat_input()
if prompt:
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
st.session_state.ex_prompt = None
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
if cfg.streaming:
stream = generate_streaming_response(prompt)
response = st.write_stream(stream)
else:
with st.spinner("Thinking..."):
response = generate_response(prompt)
st.write(response)
response = escape_dollars_outside_latex(response)
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message)
# Send query and response to Amplitude Analytics
send_amplitude_data(
user_query=st.session_state.messages[-2]["content"],
chat_response=st.session_state.messages[-1]["content"],
demo_name=cfg["title"],
language=st.session_state.language
)
st.rerun()
if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != "How may I help you?"):
streamlit_feedback(feedback_type="thumbs", on_submit = thumbs_feedback, key = st.session_state.feedback_key,
kwargs = {"user_query": st.session_state.messages[-2]["content"],
"chat_response": st.session_state.messages[-1]["content"],
"demo_name": cfg["title"],
"response_language": st.session_state.language})
if __name__ == "__main__":
launch_bot() |