File size: 8,942 Bytes
bac90e2
f967233
7ff5a5f
f967233
4a6ffa9
f967233
4a6ffa9
 
 
f967233
4a6ffa9
f967233
4a6ffa9
 
 
 
 
 
f967233
4a6ffa9
f967233
4a6ffa9
 
b9f8e19
4d40d65
7213597
4a6ffa9
 
 
 
 
 
 
 
 
 
ef4f099
bd184ef
cc4707b
1166206
65ce9a0
bd184ef
f967233
 
4d40d65
79b7ab6
4d40d65
 
 
2531b3b
94245dc
2531b3b
f967233
4d40d65
5b858b3
 
94245dc
95e80b6
4d40d65
 
b9f8e19
94245dc
4a6ffa9
 
 
5ee1375
 
faaebd2
 
 
0def846
faaebd2
 
95e80b6
 
faaebd2
 
 
 
 
95e80b6
 
faaebd2
5ee1375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
faaebd2
c8d4e8c
faaebd2
 
 
 
 
 
 
95e80b6
 
5ee1375
 
4a6ffa9
bc24339
 
 
 
 
 
 
 
 
 
 
95e80b6
 
bc24339
 
 
 
 
d4a32ac
 
bc24339
 
 
 
 
 
95e80b6
 
bc24339
 
 
 
 
f967233
bc24339
 
b26e319
bc24339
 
 
 
7ff5a5f
95e80b6
 
29c5180
bc24339
 
 
29c5180
bc24339
 
 
 
 
a68dd63
bc24339
29c5180
 
 
 
bc24339
b26e319
bc24339
 
 
b26e319
bc24339
 
 
 
 
 
c0ee5c0
bc24339
 
 
 
 
 
7ff5a5f
95e80b6
 
bc24339
 
 
 
 
 
 
 
 
 
 
 
95e80b6
 
bc24339
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
import os
import string
import time
from typing import Any, Dict, List, Tuple, Union

import chromadb
import numpy as np
import openai
import pandas as pd
import requests
import streamlit as st
from datasets import load_dataset
from langchain.document_loaders import TextLoader
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from scipy.spatial.distance import cosine

from utils.helper_functions import *

openai.api_key = os.environ["OPENAI_API_KEY"]


# Front-end Design
st.set_page_config(layout="wide")
st.title("YSA|Larkin Chatbot")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

st.sidebar.markdown(
    """    
    ### Instructions:
    
    This app guides you through [YSA](https://youthspiritartworks.org/)/[Larkin](https://larkinstreetyouth.org/) website , utilizing a RAG-ready Q&A dataset [here](https://huggingface.co/datasets/eagle0504/youthless-homeless-shelter-web-scrape-dataset-qa-formatted) for chatbot assistance. The Larkin domain is processed into QA data [here](https://huggingface.co/datasets/eagle0504/larkin-web-scrape-dataset-qa-formatted). 🤖 Enter a question, and it finds similar ones in the database, offering answers with a distance score to gauge relevance—the lower the score, the closer the match. 🎯 For better accuracy and to reduce errors, user feedback helps refine the database. ✨

    """
)
st.sidebar.success("Select a shelter first!")
option = st.sidebar.selectbox("Which website do you want to ask?", ("YSA", "Larkin"))
st.sidebar.warning(
    "Runnning AI Judge takes a bit longer so we default this option as 'No'."
)
run_ai_judge = st.sidebar.selectbox(
    "Shall we run AI Judge to provide additional scores?", ("No", "Yes")
)
special_threshold = st.sidebar.number_input(
    "Insert a threshold for distances score to filter data (default 0.2):",
    value=0.2,
    placeholder="Type a number...",
)
user_timer = st.sidebar.selectbox("Shall we time each step?", ("No", "Yes"))
st.sidebar.success(
    "The 'distances' score indicates the proximity of your question to our database questions (lower is better). The 'ai_judge' ranks the similarity between user's question and database answers independently (higher is better)."
)
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button:
    st.session_state.messages = []


# Load the dataset from a provided source.
if option == "YSA":
    begin_t = time.time()
    dataset = load_dataset(
        "eagle0504/ysa-web-scrape-dataset-qa-formatted-small-version"
    )
    end_t = time.time()
    if user_timer == "Yes":
        st.success(f"{option} Database loaded. | Time: {end_t - begin_t} sec")
    initial_input = "Tell me about YSA"
else:
    begin_t = time.time()
    dataset = load_dataset("eagle0504/larkin-web-scrape-dataset-qa-formatted")
    end_t = time.time()
    if user_timer == "Yes":
        st.success(f"{option} Database loaded. | Time: {end_t - begin_t} sec")
    initial_input = "Tell me about Larkin"


# Initialize a new client for ChromeDB.
client = chromadb.Client()

# Generate a random number between 1 billion and 10 billion.
random_number: int = np.random.randint(low=1e9, high=1e10)

# Generate a random string consisting of 10 uppercase letters and digits.
random_string: str = "".join(
    np.random.choice(list(string.ascii_uppercase + string.digits), size=10)
)

# Combine the random number and random string into one identifier.
combined_string: str = f"{random_number}{random_string}"

# Create a new collection in ChromeDB with the combined string as its name.
collection = client.create_collection(combined_string)


# Embed and store the first N supports for this demo
with st.spinner("Loading, please be patient with us ... 🙏"):
    L = len(dataset["train"]["questions"])
    begin_t = time.time()
    collection.add(
        ids=[str(i) for i in range(0, L)],  # IDs are just strings
        documents=dataset["train"]["questions"],  # Enter questions here
        metadatas=[{"type": "support"} for _ in range(0, L)],
    )
    end_t = time.time()
    if user_timer == "Yes":
        st.success(f"Add to VectorDB. | Time: {end_t - begin_t} sec")


# React to user input
if prompt := st.chat_input(initial_input):
    with st.spinner("Loading, please be patient with us ... 🙏"):
        # Display user message in chat message container
        st.chat_message("user").markdown(prompt)
        # Add user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})

        question = prompt
        begin_t = time.time()
        results = collection.query(query_texts=question, n_results=5)
        end_t = time.time()
        if user_timer == "Yes":
            st.success(f"Query answser. | Time: {end_t - begin_t} sec")
        idx = results["ids"][0]
        idx = [int(i) for i in idx]
        ref = pd.DataFrame(
            {
                "idx": idx,
                "questions": [dataset["train"]["questions"][i] for i in idx],
                "answers": [dataset["train"]["answers"][i] for i in idx],
                "distances": results["distances"][0],
            }
        )
        # special_threshold = st.sidebar.slider('How old are you?', 0, 0.6, 0.1) # 0.3
        filtered_ref = ref[ref["distances"] < special_threshold]
        if filtered_ref.shape[0] > 0:
            if user_timer == "Yes":
                st.success("There are highly relevant information in our database.")
            ref_from_db_search = filtered_ref["answers"].str.cat(sep=" ")
            final_ref = filtered_ref
        else:
            st.warning(
                "The database may not have relevant information to help your question so please be aware of hallucinations."
            )
            ref_from_db_search = ref["answers"].str.cat(sep=" ")
            final_ref = ref

        if option == "YSA":
            try:
                begin_t = time.time()
                llm_response = llama2_7b_ysa(question)
                end_t = time.time()
                if user_timer == "Yes":
                    st.success(f"Running LLM. | Time: {end_t - begin_t} sec")
                did_this_llm_run = "yes"
            except:
                st.warning("Sorry, the inference endpoint is temporarily down. 😔")
                llm_response = "NA."
                did_this_llm_run = "no"
        else:
            st.warning(
                "Apologies! We are in the progress of fine-tune the model, so it's currently unavailable. ⚙️"
            )
            llm_response = "NA"

        finetuned_llm_guess = ["from_llm", question, llm_response, 0]
        if did_this_llm_run == "no":
            st.warning("Fine-tuned LLM not used in this call.")
        else:
            final_ref.loc[-1] = finetuned_llm_guess
        final_ref = final_ref.reset_index()

        # add ai judge as additional rating
        if run_ai_judge == "Yes":
            independent_ai_judge_score = []
            begin_t = time.time()
            for i in range(final_ref.shape[0]):
                this_content = final_ref["answers"][i]
                if len(this_content) > 3:
                    arr1 = openai_text_embedding(question)
                    arr2 = openai_text_embedding(this_content)
                    # this_score = calculate_sts_openai_score(question, this_content)
                    this_score = quantized_influence(arr1, arr2, k=3)[0]
                else:
                    this_score = 0
                independent_ai_judge_score.append(this_score)

            final_ref["ai_judge"] = independent_ai_judge_score

            end_t = time.time()
            if user_timer == "Yes":
                st.success(f"Using AI Judge. | Time: {end_t - begin_t} sec")

        engineered_prompt = f"""
            Based on the context: {ref_from_db_search}

            answer the user question: {question}

            Answer the question directly (don't say "based on the context, ...")
        """

        begin_t = time.time()
        answer = call_chatgpt(engineered_prompt)
        end_t = time.time()
        if user_timer == "Yes":
            st.success(f"Final API Call. | Time: {end_t - begin_t} sec")
        response = answer

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        with st.spinner("Wait for it..."):
            st.markdown(response)
            with st.expander("See reference:"):
                st.table(final_ref)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})