File size: 8,759 Bytes
592c3d9 e49f5ad 7c3a548 e49f5ad 592c3d9 e49f5ad 592c3d9 e49f5ad d4647c0 e49f5ad 5e13129 e49f5ad 42ac7b3 e49f5ad 42ac7b3 e49f5ad be93ed6 e49f5ad 592c3d9 e49f5ad 09534c2 fb4073e e49f5ad be93ed6 b032e38 e49f5ad b032e38 e49f5ad 1635e60 b032e38 e49f5ad c557ead e49f5ad c557ead e49f5ad 09534c2 22d2aff 09534c2 ca216e5 09534c2 c557ead 09534c2 e49f5ad 592c3d9 be93ed6 592c3d9 e49f5ad 592c3d9 e49f5ad 592c3d9 e49f5ad ca216e5 e49f5ad d4647c0 e49f5ad 7c3a548 592c3d9 d4647c0 be93ed6 ca216e5 e57fc18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
import time
import streamlit as st
import logging
import pandas as pd
from json import JSONDecodeError
from markdown import markdown
import random
from typing import List, Dict, Any, Tuple, Optional
from haystack.document_stores import FAISSDocumentStore
from haystack.nodes import EmbeddingRetriever
from haystack.pipelines import ExtractiveQAPipeline
from haystack.nodes import FARMReader
from haystack.pipelines import ExtractiveQAPipeline
from annotated_text import annotation
import shutil
from urllib.parse import unquote
# FAISS index directory
INDEX_DIR = 'data/index'
# the following function is cached to make index and models load only at start
@st.cache(hash_funcs={"builtins.SwigPyObject": lambda _: None}, allow_output_mutation=True)
def start_haystack():
"""
load document store, retriever, reader and create pipeline
"""
shutil.copy(f'{INDEX_DIR}/faiss_document_store.db','.')
document_store = FAISSDocumentStore(
faiss_index_path=f'{INDEX_DIR}/my_faiss_index.faiss',
faiss_config_path=f'{INDEX_DIR}/my_faiss_index.json')
print (f'Index size: {document_store.get_document_count()}')
retriever = EmbeddingRetriever(
document_store=document_store,
embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1",
model_format="sentence_transformers"
)
reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True)
pipe = ExtractiveQAPipeline(reader, retriever)
return pipe
@st.cache()
def load_questions():
with open('./data/questions.txt') as fin:
questions = [line.strip() for line in fin.readlines()]
return questions
def set_state_if_absent(key, value):
if key not in st.session_state:
st.session_state[key] = value
def query(pipe, question):
"""Run query and get answers"""
return (pipe.run(question, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}}), None)
def main():
# st.set_page_config(page_title='Who killed Laura Palmer?',
# page_icon="https://static.wikia.nocookie.net/twinpeaks/images/4/4a/Site-favicon.ico/revision/latest?cb=20210710003705")
pipe=start_haystack()
questions = load_questions()
# Persistent state
set_state_if_absent('question', "Where is Twin Peaks?")
set_state_if_absent('answer', '')
set_state_if_absent('results', None)
set_state_if_absent('raw_json', None)
set_state_if_absent('random_question_requested', False)
# Small callback to reset the interface in case the text of the question changes
def reset_results(*args):
st.session_state.answer = None
st.session_state.results = None
st.session_state.raw_json = None
# sidebar style
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child{
width: 350px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child{
width: 350px;
margin-left: -350px;
}
""",
unsafe_allow_html=True,
)
# Title
st.write("# Who killed Laura Palmer?")
st.write("### The first Twin Peaks Question Answering system!")
st.markdown("""
Ask any question about Twin Peaks [Twin Peaks] (https://twinpeaks.fandom.com/wiki/Twin_Peaks)
and see if the AI ββcan find an answer...
*Note: do not use keywords, but full-fledged questions.*
""")
# Sidebar
st.sidebar.header("Who killed Laura Palmer?")
st.sidebar.image("https://upload.wikimedia.org/wikipedia/it/3/39/Twin-peaks-1990.jpg")
st.sidebar.markdown('<p align="center"><b>Twin Peaks Question Answering system</b></p>', unsafe_allow_html=True)
st.sidebar.markdown(f"""
<style>
a {{
text-decoration: none;
}}
.haystack-footer {{
text-align: center;
}}
.haystack-footer h4 {{
margin: 0.1rem;
padding:0;
}}
footer {{
opacity: 0;
}}
.haystack-footer img {{
display: block;
margin-left: auto;
margin-right: auto;
width: 85%;
}}
</style>
<div class="haystack-footer">
<p><a href="https://github.com/anakin87/who-killed-laura-palmer">GitHub</a> -
Built with <a href="https://github.com/deepset-ai/haystack/">Haystack</a><br/>
<small>Data crawled from <a href="https://twinpeaks.fandom.com/wiki/Twin_Peaks_Wiki">Twin Peaks Wiki</a>.</small>
</p>
<img src = 'https://static.wikia.nocookie.net/twinpeaks/images/e/ef/Laura_Palmer%2C_the_Queen_Of_Hearts.jpg'/>
<br/>
</div>
""", unsafe_allow_html=True)
# spotify webplayer
st.sidebar.markdown("""
<p align="center">
<iframe style="border-radius:12px" src="https://open.spotify.com/embed/playlist/38rrtWgflrw7grB37aMlsO?utm_source=generator" width="85%" height="380" frameBorder="0" allowfullscreen="" allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture"></iframe>
</p>""", unsafe_allow_html=True)
# Search bar
question = st.text_input("",
value=st.session_state.question,
max_chars=100,
#on_change=reset_results
)
col1, col2 = st.columns(2)
col1.markdown("<style>.stButton button {width:100%;}</style>", unsafe_allow_html=True)
col2.markdown("<style>.stButton button {width:100%;}</style>", unsafe_allow_html=True)
# Run button
run_pressed = col1.button("Run")
# Get next random question from the CSV
if col2.button("Random question"):
reset_results()
question = random.choice(questions)
while question == st.session_state.question: # Avoid picking the same question twice (the change is not visible on the UI)
question = random.choice(questions)
st.session_state.question = question
# st.session_state.answer = new_row["Answer"].values[0]
st.session_state.random_question_requested = True
# Re-runs the script setting the random question as the textbox value
# Unfortunately necessary as the Random Question button is _below_ the textbox
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
else:
st.session_state.random_question_requested = False
run_query = (run_pressed or question != st.session_state.question) and not st.session_state.random_question_requested
# Get results for query
if run_query and question:
time_start=time.time()
reset_results()
st.session_state.question = question
with st.spinner(
"π§ Performing neural search on documents..."
):
try:
st.session_state.results, st.session_state.raw_json = query(pipe, question)
time_end=time.time()
print(f'elapsed time: {time_end - time_start}')
except JSONDecodeError as je:
st.error("π An error occurred reading the results. Is the document store working?")
return
except Exception as e:
logging.exception(e)
if "The server is busy processing requests" in str(e) or "503" in str(e):
st.error("π§βπΎ All our workers are busy! Try again later.")
else:
st.error("π An error occurred during the request.")
return
if st.session_state.results:
st.write("## Results:")
alert_irrelevance=True
for count, result in enumerate(st.session_state.results['answers']):
result=result.to_dict()
if result["answer"]:
if alert_irrelevance and result['score']<0.50:
alert_irrelevance = False
st.write("<h4 style='color: darkred'>Attention, the following answers have low score:</h3>", unsafe_allow_html=True)
answer, context = result["answer"], result["context"]
start_idx = context.find(answer)
end_idx = start_idx + len(answer)
# Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190
st.write(markdown("- ..."+context[:start_idx] + str(annotation(answer, "ANSWER", "#3e1c21")) + context[end_idx:]+"..."), unsafe_allow_html=True)
source = ""
name = unquote(result['meta']['name'])
url = result['meta']['url']
source = f"[{name}]({url})"
st.markdown(f"**Score:** {result['score']:.2f} - **Source:** {source}")
else:
st.info("π€ Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!")
main()
|