Huzaifa367 commited on
Commit
5cb6b47
·
verified ·
1 Parent(s): d99127e

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +104 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PyPDF2 import PdfReader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain_groq import ChatGroq
5
+ from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.chains.question_answering import load_qa_chain
8
+ from langchain.prompts import PromptTemplate
9
+ import tempfile
10
+ from gtts import gTTS
11
+ import os
12
+
13
+ def text_to_speech(text):
14
+ tts = gTTS(text=text, lang='en')
15
+ audio_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
16
+ temp_filename = audio_file.name
17
+ tts.save(temp_filename)
18
+ st.audio(temp_filename, format='audio/mp3')
19
+ os.remove(temp_filename)
20
+
21
+ def get_pdf_text(pdf_docs):
22
+ text=""
23
+ for pdf in pdf_docs:
24
+ pdf_reader= PdfReader(pdf)
25
+ for page in pdf_reader.pages:
26
+ text+= page.extract_text()
27
+ return text
28
+
29
+ def get_text_chunks(text):
30
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
31
+ chunks = text_splitter.split_text(text)
32
+ return chunks
33
+
34
+ def get_vector_store(text_chunks, api_key):
35
+ embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
36
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
37
+ vector_store.save_local("faiss_index")
38
+
39
+ def get_conversational_chain():
40
+
41
+ prompt_template = """
42
+ Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
43
+ provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
44
+ Context:\n {context}?\n
45
+ Question: \n{question}\n
46
+
47
+ Answer:
48
+ """
49
+
50
+ model = ChatGroq(temperature=0, groq_api_key="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", model_name="llama3-8b-8192")
51
+
52
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
53
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
54
+
55
+ return chain
56
+
57
+ def user_input(user_question, api_key):
58
+ embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
59
+
60
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
61
+ docs = new_db.similarity_search(user_question)
62
+
63
+ chain = get_conversational_chain()
64
+
65
+ response = chain(
66
+ {"input_documents":docs, "question": user_question}
67
+ , return_only_outputs=True)
68
+
69
+ print(response) # Debugging line
70
+
71
+ st.write("Replies:")
72
+ if isinstance(response["output_text"], str):
73
+ response_list = [response["output_text"]]
74
+ else:
75
+ response_list = response["output_text"]
76
+
77
+ for text in response_list:
78
+ st.write(text)
79
+ # Convert text to speech for each response
80
+ text_to_speech(text)
81
+
82
+ def main():
83
+ st.set_page_config("Chat PDF")
84
+ st.header("Chat with PDF")
85
+
86
+ api_key = st.secrets["inference_api_key"]
87
+
88
+ user_question = st.text_input("Ask a Question from the PDF Files")
89
+
90
+ if user_question:
91
+ user_input(user_question, api_key)
92
+
93
+ with st.sidebar:
94
+ st.title("Menu:")
95
+ pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
96
+ if st.button("Submit & Process"):
97
+ with st.spinner("Processing..."):
98
+ raw_text = get_pdf_text(pdf_docs)
99
+ text_chunks = get_text_chunks(raw_text)
100
+ get_vector_store(text_chunks, api_key)
101
+ st.success("Done")
102
+
103
+ if __name__ == "__main__":
104
+ main()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ requests
2
+ streamlit
3
+ faiss-cpu
4
+ PyPDF2
5
+ langchain
6
+ langchain-groq
7
+ langchain-community
8
+ python-dotenv
9
+ gtts