Mr-TD commited on
Commit
9dc99bc
·
1 Parent(s): 60be2a0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import ollama
3
+ from utils.QA_Bot import QA_Bot
4
+ from utils.PDF_Reader import PDF_4_QA
5
+
6
+ ollama.pull("llama3.1")
7
+
8
+ # Streamlit app
9
+ def main():
10
+ st.sidebar.title("Upload PDF")
11
+ st.sidebar.write("Download Demo PDF file from Below....")
12
+ with open("Demo_Document/Kia_EV6.pdf", "rb") as file:
13
+ btn = st.sidebar.download_button(
14
+ label="Download PDF",
15
+ data=file,
16
+ file_name="Kia_EV6.pdf"
17
+ )
18
+
19
+ uploaded_file = st.sidebar.file_uploader("Choose a PDF file", type="pdf")
20
+ if uploaded_file is not None:
21
+ st.sidebar.success("File uploaded successfully.")
22
+ vector_store = PDF_4_QA(uploaded_file)
23
+ QA_Bot(vector_store)
24
+
25
+ if __name__ == '__main__':
26
+ main()
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.3.17
2
+ langchain_ollama==0.2.3
3
+ ollama==0.4.7
4
+ PyPDF2==3.0.1
5
+ streamlit==1.29.0
6
+ streamlit==1.32.2
7
+ streamlit==1.35.0
8
+ streamlit==1.26.0
9
+ streamlit==1.41.1
10
+ ~treamlit==1.29.0
utils/PDF_Reader.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import PyPDF2
2
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
3
+ from langchain.embeddings import HuggingFaceBgeEmbeddings
4
+ from langchain.vectorstores import FAISS
5
+
6
+ def read_pdf(uploaded_file):
7
+ pdf_reader = PyPDF2.PdfReader(uploaded_file)
8
+ text = ""
9
+ for page in pdf_reader.pages:
10
+ text += page.extract_text()
11
+ return text
12
+
13
+ def Chunks(docs):
14
+ text_splitter = RecursiveCharacterTextSplitter(
15
+ # Set a really small chunk size, just to show.
16
+ chunk_size = 1000,
17
+ chunk_overlap = 100,
18
+ )
19
+ doc = text_splitter.split_text(docs)
20
+ return doc
21
+
22
+
23
+ def PDF_4_QA(file):
24
+ content = read_pdf(file)
25
+ pdf_chunks = Chunks(docs=content)
26
+
27
+ embeddings = HuggingFaceBgeEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
28
+ model_kwargs={'device': 'cpu'})
29
+ vectorstore_openai = FAISS.from_texts(pdf_chunks, embeddings)
30
+
31
+ return vectorstore_openai
utils/QA_Bot.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from utils.QnA import Q_A
3
+ import re,time
4
+
5
+
6
+ def QA_Bot(vectorstore):
7
+ st.title("Q&A Bot")
8
+ # Initialize chat history
9
+ if "messages" not in st.session_state:
10
+ st.session_state.messages = []
11
+
12
+ # Display chat messages from history on app rerun
13
+ for message in st.session_state.messages:
14
+ with st.chat_message(message["role"]):
15
+ st.markdown(message["content"])
16
+
17
+ # React to user input
18
+ if prompt := st.chat_input("What is up?"):
19
+ # Display user message in chat message container
20
+ st.chat_message("user").markdown(prompt)
21
+ # Add user message to chat history
22
+ st.session_state.messages.append({"role": "user", "content": prompt})
23
+
24
+ ai_response = Q_A(vectorstore,prompt)
25
+ response = f"Echo: {ai_response}"
26
+ # Display assistant response in chat message container
27
+ with st.chat_message("assistant"):
28
+ message_placeholder = st.empty()
29
+ full_response = ""
30
+ for chunk in re.split(r'(\s+)', response):
31
+ full_response += chunk + " "
32
+ time.sleep(0.01)
33
+
34
+ # Add a blinking cursor to simulate typing
35
+ message_placeholder.markdown(full_response + "▌")
36
+ # Add assistant response to chat history
37
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
utils/QnA.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chains import RetrievalQA
2
+ from langchain_ollama import ChatOllama
3
+
4
+ def Q_A(vectorstore,question):
5
+ ollama_llm = ChatOllama(
6
+ model="llama3.1",
7
+ temperature=0.5,
8
+ )
9
+ qa = RetrievalQA.from_chain_type(llm=ollama_llm, chain_type="stuff", retriever=vectorstore.as_retriever())
10
+ answer = qa.invoke(question)
11
+
12
+ return answer['result']
utils/__pycache__/Api_Key.cpython-311.pyc ADDED
Binary file (285 Bytes). View file
 
utils/__pycache__/PDF_Reader.cpython-311.pyc ADDED
Binary file (1.65 kB). View file
 
utils/__pycache__/QA_Bot.cpython-311.pyc ADDED
Binary file (2.28 kB). View file
 
utils/__pycache__/QnA.cpython-311.pyc ADDED
Binary file (848 Bytes). View file