ofermend commited on
Commit
907ed81
1 Parent(s): 06e14df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -43
app.py CHANGED
@@ -1,76 +1,141 @@
1
- import sys
2
- import toml
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from omegaconf import OmegaConf
4
  from query import VectaraQuery
5
  import os
6
 
7
  import streamlit as st
8
  from PIL import Image
9
- from functools import partial
10
 
11
- def set_query(q: str):
12
- st.session_state['query'] = q
 
 
13
 
14
  def launch_bot():
15
- def get_answer(question):
16
  response = vq.submit_query(question)
17
  return response
 
 
 
 
18
 
19
- corpus_ids = list(eval(os.environ['corpus_ids']))
20
- questions = list(eval(os.environ['examples']))
21
- cfg = OmegaConf.create({
22
- 'customer_id': os.environ['customer_id'],
23
- 'corpus_ids': corpus_ids,
24
- 'api_key': os.environ['api_key'],
25
- 'title': os.environ['title'],
26
- 'description': os.environ['description'],
27
- 'examples': questions,
28
- 'source_data_desc': os.environ['source_data_desc']
29
- })
30
- vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids)
 
 
 
 
 
 
31
  st.set_page_config(page_title=cfg.title, layout="wide")
32
 
33
  # left side content
34
  with st.sidebar:
35
  image = Image.open('Vectara-logo.png')
36
  st.markdown(f"## Welcome to {cfg.title}\n\n"
37
- f"With this demo uses Retieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
38
 
39
  st.markdown("---")
40
  st.markdown(
41
  "## How this works?\n"
42
  "This app was built with [Vectara](https://vectara.com).\n"
43
  "Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
44
- "This app uses Vectara API to query the corpus and present the results to you, answering your question.\n\n"
45
  )
46
  st.markdown("---")
47
  st.image(image, width=250)
48
 
49
- st.markdown(f"<center> <h2> Vectara demo app: {cfg.title} </h2> </center>", unsafe_allow_html=True)
50
  st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
51
 
52
- # Setup a split column layout
53
- main_col, questions_col = st.columns([4, 2], gap="medium")
54
- with main_col:
55
- cols = st.columns([1, 8], gap="small")
56
- cols[0].markdown("""<h5>Search</h5>""", unsafe_allow_html=True)
57
- cols[1].text_input(label="search", key='query', max_chars=256, label_visibility='collapsed', help="Enter your question here")
58
-
59
- st.markdown("<h5>Response</h5>", unsafe_allow_html=True)
60
- response_text = st.empty()
61
- response_text.text_area(f" ", placeholder="The answer will appear here.", disabled=True,
62
- key="response", height=1, label_visibility='collapsed')
63
- with questions_col:
64
- st.markdown("<h5 style='text-align:center; color: red'> Sample questions </h5>", unsafe_allow_html=True)
65
- for q in list(cfg.examples):
66
- st.button(q, on_click=partial(set_query, q), use_container_width=True)
67
-
68
-
69
- # run the main flow
70
- if st.session_state.get('query'):
71
- query = st.session_state['query']
72
- response = get_answer(query)
73
- response_text.markdown(response)
 
 
 
 
 
 
 
 
 
 
74
 
75
  if __name__ == "__main__":
76
  launch_bot()
 
 
1
+ Hugging Face's logo
2
+ Hugging Face
3
+ Search models, datasets, users...
4
+ Models
5
+ Datasets
6
+ Spaces
7
+ Posts
8
+ Docs
9
+ Solutions
10
+ Pricing
11
+
12
+
13
+
14
+ Spaces:
15
+
16
+ vectara
17
+ /
18
+ IRS-chat
19
+
20
+
21
+ like
22
+ 3
23
+
24
+ Logs
25
+ App
26
+ Files
27
+ Community
28
+ Settings
29
+ IRS-chat
30
+ /
31
+ app.py
32
+
33
+ ofermend's picture
34
+ ofermend
35
+ Update app.py
36
+ 81cb44a
37
+ VERIFIED
38
+ 1 day ago
39
+ raw
40
+ history
41
+ blame
42
+ edit
43
+ delete
44
+ No virus
45
+ 3.56 kB
46
  from omegaconf import OmegaConf
47
  from query import VectaraQuery
48
  import os
49
 
50
  import streamlit as st
51
  from PIL import Image
 
52
 
53
+ def isTrue(x) -> bool:
54
+ if isinstance(x, bool):
55
+ return s
56
+ return x.strip().lower() == 'true'
57
 
58
  def launch_bot():
59
+ def generate_response(question):
60
  response = vq.submit_query(question)
61
  return response
62
+
63
+ def generate_streaming_response(question):
64
+ response = vq.submit_query_streaming(question)
65
+ return response
66
 
67
+ if 'cfg' not in st.session_state:
68
+ corpus_ids = str(os.environ['corpus_ids']).split(',')
69
+ cfg = OmegaConf.create({
70
+ 'customer_id': str(os.environ['customer_id']),
71
+ 'corpus_ids': corpus_ids,
72
+ 'api_key': str(os.environ['api_key']),
73
+ 'title': os.environ['title'],
74
+ 'description': os.environ['description'],
75
+ 'source_data_desc': os.environ['source_data_desc'],
76
+ 'streaming': isTrue(os.environ.get('streaming', False)),
77
+ 'questions': os.environ['questions'],
78
+ 'prompt_name': os.environ.get('prompt_name', None)
79
+ })
80
+ st.session_state.cfg = cfg
81
+ st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
82
+
83
+ cfg = st.session_state.cfg
84
+ vq = st.session_state.vq
85
  st.set_page_config(page_title=cfg.title, layout="wide")
86
 
87
  # left side content
88
  with st.sidebar:
89
  image = Image.open('Vectara-logo.png')
90
  st.markdown(f"## Welcome to {cfg.title}\n\n"
91
+ f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
92
 
93
  st.markdown("---")
94
  st.markdown(
95
  "## How this works?\n"
96
  "This app was built with [Vectara](https://vectara.com).\n"
97
  "Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
98
+ "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
99
  )
100
  st.markdown("---")
101
  st.image(image, width=250)
102
 
103
+ st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
104
  st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
105
 
106
+ if "messages" not in st.session_state.keys():
107
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
108
+ for question in questions:
109
+ st.button(question, on_click=lambda q=question: submit_question(q))
110
+
111
+
112
+ # Display chat messages
113
+ for message in st.session_state.messages:
114
+ with st.chat_message(message["role"]):
115
+ st.write(message["content"])
116
+
117
+ # User-provided prompt
118
+ if prompt := st.chat_input():
119
+ submit_question(prompt)
120
+
121
+ def submit_question(question):
122
+ st.session_state.messages.append({"role": "user", "content": question})
123
+ with st.chat_message("user"):
124
+ st.write(question)
125
+ generate_and_display_response(question)
126
+
127
+ def generate_and_display_response(question):
128
+ if cfg.streaming:
129
+ stream = generate_streaming_response(question)
130
+ response = st.write_stream(stream)
131
+ else:
132
+ with st.spinner("Thinking..."):
133
+ response = generate_response(question)
134
+ st.write(response)
135
+ message = {"role": "assistant", "content": response}
136
+ st.session_state.messages.append(message)
137
+
138
 
139
  if __name__ == "__main__":
140
  launch_bot()
141
+