Spaces:
Runtime error
Runtime error
File size: 7,376 Bytes
34cce64 32e6d56 34cce64 32e6d56 34cce64 8f54b67 34cce64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
import os
import chainlit as cl
from dotenv import load_dotenv
from numpy import arange
from operator import itemgetter
from langchain_huggingface import HuggingFaceEndpoint
from langchain_community.document_loaders import PyPDFLoader
from langchain import text_splitter
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEndpointEmbeddings
from langchain_core.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.runnable.config import RunnableConfig
# GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
# ---- ENV VARIABLES ---- #
"""
This function will load our environment file (.env) if it is present.
NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
"""
load_dotenv()
"""
We will load our environment variables here.
"""
HF_LLM_ENDPOINT = os.environ["HF_LLM_ENDPOINT"]
HF_EMBED_ENDPOINT = os.environ["HF_EMBED_ENDPOINT"]
HF_TOKEN = os.environ["HF_TOKEN"]
# ---- GLOBAL DECLARATIONS ---- #
# added for Docker purposes compared to run chainlit app
DATA_DIR = "./data"
VECTORSTORE_DIR = os.path.join(DATA_DIR, "vectorstore")
VECTORSTORE_PATH = os.path.join(VECTORSTORE_DIR, "index.faiss")
# -- RETRIEVAL -- #
"""
1. Load Documents from Text File
2. Split Documents into Chunks
3. Load HuggingFace Embeddings (remember to use the URL we set above)
4. Index Files if they do not exist, otherwise load the vectorstore
"""
### 1. CREATE TEXT LOADER AND LOAD DOCUMENTS
### NOTE: PAY ATTENTION TO THE PATH THEY ARE IN.
# wget --no-check-certificate 'https://drive.google.com/uc?id=1tGmnWoO-wtU_bTs_M1GVXrTB5Su61zLg' -O data/finantial_report.pdf
# loader = PyPDFLoader("/home/sahane/AIE3/Week 4/Day 1/Airbnb-10K/data/finantial_report.pdf")
# changed for Docker purpose
loader = PyPDFLoader("./data/finantial_report.pdf")
pages = loader.load_and_split()
# I noticed the the first two pages could be not included'
text_content=[]
[text_content.append((pages[i].page_content.replace('Table of Contents\n', ''), {'page source': i})) for i in arange(2,len(pages))]
# There are some expression that could help structured and unstructured texts be separated
import re
# Regular expression patterns for identifying structured and unstructured sections
structured_pattern = re.compile(r"\(in millions(?:, except\b.*)?\)|\b(unaudited)\b|\bBalance Sheet\b|\bIncome Statement\b|\bCash Flows\b|\bfollowing table\b", re.IGNORECASE)
# Split the text content
structured_data = []
unstructured_data = []
for text in text_content:
if structured_pattern.search(text[0]):
structured_data.append(text[0] + str(text[1]))
else:
unstructured_data.append(text[0] + str(text[1]))
#Alldoc = text_splitter.create_documents(structured_data + unstructured_data)
from langchain.schema import Document
documents = []
for idx, text in enumerate(structured_data + unstructured_data):
document = Document(id=idx, page_content=text)
documents.append(document)
### 2. CREATE TEXT SPLITTER AND SPLIT DOCUMENTS
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap = 200)
# split_chunks = text_splitter.split_documents(Alldoc)
split_chunks = text_splitter.split_documents(documents)
### 3. LOAD HUGGINGFACE EMBEDDINGS
hf_embeddings = HuggingFaceEndpointEmbeddings(
model=HF_EMBED_ENDPOINT,
task="feature-extraction",
huggingfacehub_api_token=os.environ["HF_TOKEN"],
)
## Prevent re-indexing if vectorstores already exists
if os.path.exists(VECTORSTORE_PATH):
vectorstore = FAISS.load_local(
VECTORSTORE_DIR,#"./data/vectorstore",
hf_embeddings,
allow_dangerous_deserialization=True # this is necessary to load the vectorstore from disk as it's stored as a `.pkl` file.
)
hf_retriever = vectorstore.as_retriever()
print("Loaded Vectorstore")
else:
print("Indexing Files")
os.makedirs(VECTORSTORE_DIR, exist_ok=True)
### 4. INDEX FILES
### NOTE: REMEMBER TO BATCH THE DOCUMENTS WITH MAXIMUM BATCH SIZE = 32
for i in range(0, len(split_chunks), 32):
if i == 0:
vectorstore = FAISS.from_documents(split_chunks[i:i+32], hf_embeddings)
continue
vectorstore.add_documents(split_chunks[i:i+32])
vectorstore.save_local(VECTORSTORE_DIR)
hf_retriever = vectorstore.as_retriever()
# -- AUGMENTED -- #
"""
1. Define a String Template
2. Create a Prompt Template from the String Template
"""
### 1. DEFINE STRING TEMPLATE
RAG_PROMPT_TEMPLATE = """\
<|start_header_id|>system<|end_header_id|>
You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context,\
say you don't know.<|eot_id|>
<|start_header_id|>user<|end_header_id|>
User Query:
{query}
Context:
{context}<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
"""
#Note that we do not have the response here. We have assistent, we ONLY start, but not followed by <|eot_id> as we do not have a response YET.
### 2. CREATE PROMPT TEMPLATE
rag_prompt =PromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
# -- GENERATION -- #
"""
1. Create a HuggingFaceEndpoint for the LLM
"""
### 1. CREATE HUGGINGFACE ENDPOINT FOR LLM
hf_llm = HuggingFaceEndpoint(
endpoint_url=f"{HF_LLM_ENDPOINT}",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
huggingfacehub_api_token=os.environ["HF_TOKEN"]
)
@cl.author_rename
def rename(original_author: str):
"""
This function can be used to rename the 'author' of a message.
In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
"""
rename_dict = {
"Assistant" : "Airbnb 10k Bot"
}
return rename_dict.get(original_author, original_author)
@cl.on_chat_start
async def start_chat():
"""
This function will be called at the start of every user session.
We will build our LCEL RAG chain here, and store it in the user session.
The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
"""
### BUILD LCEL RAG CHAIN THAT ONLY RETURNS TEXT
lcel_rag_chain = ( {"context": itemgetter("query") | hf_retriever, "query": itemgetter("query")}
| rag_prompt | hf_llm
)
cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
@cl.on_message
async def main(message: cl.Message):
"""
This function will be called every time a message is recieved from a session.
We will use the LCEL RAG chain to generate a response to the user query.
The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
"""
lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
msg = cl.Message(content="")
async for chunk in lcel_rag_chain.astream(
{"query": message.content},
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
):
await msg.stream_token(chunk)
await msg.send()
# docker build -t airbnb-llm-chainrag-chainlit-hfs . |