Spaces:
Sleeping
Sleeping
Mr-Vicky-01
commited on
Commit
•
8a9cc5d
1
Parent(s):
eee2885
Update app.py
Browse files
app.py
CHANGED
@@ -5,21 +5,22 @@ from dotenv import load_dotenv
|
|
5 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
6 |
from llama_index.core import Settings
|
7 |
import os
|
8 |
-
import
|
9 |
import time
|
|
|
10 |
|
11 |
# Load environment variables
|
12 |
load_dotenv()
|
13 |
|
14 |
-
icons = {"assistant": "robot.png", "user": "man-kddi.png"}
|
15 |
|
16 |
# Configure the Llama index settings
|
17 |
Settings.llm = HuggingFaceInferenceAPI(
|
18 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
19 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
20 |
-
context_window=
|
21 |
token=os.getenv("HF_TOKEN"),
|
22 |
-
|
23 |
generate_kwargs={"temperature": 0.1},
|
24 |
)
|
25 |
Settings.embed_model = HuggingFaceEmbedding(
|
@@ -34,25 +35,46 @@ DATA_DIR = "data"
|
|
34 |
os.makedirs(DATA_DIR, exist_ok=True)
|
35 |
os.makedirs(PERSIST_DIR, exist_ok=True)
|
36 |
|
37 |
-
def displayPDF(file):
|
38 |
-
with open(file, "rb") as f:
|
39 |
-
base64_pdf = base64.b64encode(f.read()).decode('utf-8')
|
40 |
-
pdf_display = f'<iframe src="data:application/pdf;base64,{base64_pdf}" width="100%" height="600" type="application/pdf"></iframe>'
|
41 |
-
st.markdown(pdf_display, unsafe_allow_html=True)
|
42 |
-
|
43 |
def data_ingestion():
|
44 |
documents = SimpleDirectoryReader(DATA_DIR).load_data()
|
|
|
45 |
storage_context = StorageContext.from_defaults()
|
46 |
-
index = VectorStoreIndex.from_documents(documents)
|
47 |
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
def handle_query(query):
|
50 |
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
51 |
index = load_index_from_storage(storage_context)
|
52 |
chat_text_qa_msgs = [
|
53 |
(
|
54 |
"user",
|
55 |
-
"""You are Q&A assistant named CHATTO, created by
|
56 |
Context:
|
57 |
{context_str}
|
58 |
Question:
|
@@ -61,57 +83,69 @@ def handle_query(query):
|
|
61 |
)
|
62 |
]
|
63 |
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
|
|
64 |
query_engine = index.as_query_engine(text_qa_template=text_qa_template)
|
65 |
answer = query_engine.query(query)
|
66 |
-
|
67 |
-
|
68 |
if hasattr(answer, 'response'):
|
69 |
-
|
70 |
elif isinstance(answer, dict) and 'response' in answer:
|
71 |
-
|
72 |
else:
|
73 |
-
|
74 |
|
75 |
-
|
76 |
-
for i in
|
77 |
-
yield i
|
78 |
time.sleep(0.001)
|
79 |
|
80 |
|
81 |
# Streamlit app initialization
|
82 |
st.title("Chat with your PDF📄")
|
83 |
-
st.markdown("
|
|
|
84 |
|
85 |
if 'messages' not in st.session_state:
|
86 |
-
st.session_state.messages = [{'role': 'assistant', "content": 'Hello! Upload a PDF and ask me anything about
|
87 |
-
|
|
|
88 |
for message in st.session_state.messages:
|
89 |
-
with st.chat_message(message[
|
90 |
-
st.write(message[
|
91 |
-
|
92 |
with st.sidebar:
|
93 |
st.title("Menu:")
|
94 |
uploaded_file = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button")
|
|
|
95 |
if st.button("Submit & Process"):
|
96 |
with st.spinner("Processing..."):
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
data_ingestion() # Process PDF every time new file is uploaded
|
102 |
st.success("Done")
|
103 |
|
104 |
user_prompt = st.chat_input("Ask me anything about the content of the PDF:")
|
105 |
-
|
106 |
-
if user_prompt and uploaded_file:
|
107 |
st.session_state.messages.append({'role': 'user', "content": user_prompt})
|
108 |
-
with st.chat_message("user"
|
109 |
st.write(user_prompt)
|
110 |
|
111 |
-
|
112 |
-
|
|
|
113 |
response = handle_query(user_prompt)
|
114 |
-
|
115 |
-
|
116 |
-
st.session_state.messages.append(
|
117 |
-
|
|
|
5 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
6 |
from llama_index.core import Settings
|
7 |
import os
|
8 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
9 |
import time
|
10 |
+
import shutil
|
11 |
|
12 |
# Load environment variables
|
13 |
load_dotenv()
|
14 |
|
15 |
+
# icons = {"assistant": "robot.png", "user": "man-kddi.png"}
|
16 |
|
17 |
# Configure the Llama index settings
|
18 |
Settings.llm = HuggingFaceInferenceAPI(
|
19 |
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
20 |
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
|
21 |
+
context_window=3000,
|
22 |
token=os.getenv("HF_TOKEN"),
|
23 |
+
max_new_tokens=512,
|
24 |
generate_kwargs={"temperature": 0.1},
|
25 |
)
|
26 |
Settings.embed_model = HuggingFaceEmbedding(
|
|
|
35 |
os.makedirs(DATA_DIR, exist_ok=True)
|
36 |
os.makedirs(PERSIST_DIR, exist_ok=True)
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
def data_ingestion():
|
39 |
documents = SimpleDirectoryReader(DATA_DIR).load_data()
|
40 |
+
print(documents)
|
41 |
storage_context = StorageContext.from_defaults()
|
42 |
+
index = VectorStoreIndex.from_documents(documents,show_progress=True)
|
43 |
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
44 |
|
45 |
+
def extract_transcript_details(youtube_video_url):
|
46 |
+
try:
|
47 |
+
video_id=youtube_video_url.split("=")[1]
|
48 |
+
|
49 |
+
transcript_text=YouTubeTranscriptApi.get_transcript(video_id)
|
50 |
+
|
51 |
+
transcript = ""
|
52 |
+
for i in transcript_text:
|
53 |
+
transcript += " " + i["text"]
|
54 |
+
|
55 |
+
return transcript
|
56 |
+
|
57 |
+
except Exception as e:
|
58 |
+
st.error(e)
|
59 |
+
|
60 |
+
def remove_old_files():
|
61 |
+
# Specify the directory path you want to clear
|
62 |
+
directory_path = "data"
|
63 |
+
|
64 |
+
# Remove all files and subdirectories in the specified directory
|
65 |
+
shutil.rmtree(directory_path)
|
66 |
+
|
67 |
+
# Recreate an empty directory if needed
|
68 |
+
os.makedirs(directory_path)
|
69 |
+
|
70 |
+
|
71 |
def handle_query(query):
|
72 |
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
73 |
index = load_index_from_storage(storage_context)
|
74 |
chat_text_qa_msgs = [
|
75 |
(
|
76 |
"user",
|
77 |
+
"""You are a Q&A assistant named CHATTO, created by Suriya. You have a specific response programmed for when users specifically ask about your creator, Suriya. The response is: "I was created by Suriya, an enthusiast in Artificial Intelligence. He is dedicated to solving complex problems and delivering innovative solutions. With a strong focus on machine learning, deep learning, Python, generative AI, NLP, and computer vision, Suriya is passionate about pushing the boundaries of AI to explore new possibilities." For all other inquiries, your main goal is to provide answers as accurately as possible, based on the instructions and context you have been given. If a question does not match the provided context or is outside the scope of the document, kindly advise the user to ask questions within the context of the document.
|
78 |
Context:
|
79 |
{context_str}
|
80 |
Question:
|
|
|
83 |
)
|
84 |
]
|
85 |
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
86 |
+
|
87 |
query_engine = index.as_query_engine(text_qa_template=text_qa_template)
|
88 |
answer = query_engine.query(query)
|
89 |
+
|
90 |
+
final_ans = []
|
91 |
if hasattr(answer, 'response'):
|
92 |
+
final_ans.append(answer.response)
|
93 |
elif isinstance(answer, dict) and 'response' in answer:
|
94 |
+
final_ans.append(answer['response'])
|
95 |
else:
|
96 |
+
final_ans.append("Sorry, I couldn't find an answer.")
|
97 |
|
98 |
+
ans = " ".join(final_ans)
|
99 |
+
for i in ans:
|
100 |
+
yield str(i)
|
101 |
time.sleep(0.001)
|
102 |
|
103 |
|
104 |
# Streamlit app initialization
|
105 |
st.title("Chat with your PDF📄")
|
106 |
+
st.markdown("Built by [Suriya❤️](https://github.com/theSuriya)")
|
107 |
+
st.markdown("chat here👇")
|
108 |
|
109 |
if 'messages' not in st.session_state:
|
110 |
+
st.session_state.messages = [{'role': 'assistant', "content": 'Hello! Upload a PDF and ask me anything about its content.'}]
|
111 |
+
|
112 |
+
# Display or clear chat messages
|
113 |
for message in st.session_state.messages:
|
114 |
+
with st.chat_message(message["role"]):
|
115 |
+
st.write(message["content"])
|
116 |
+
|
117 |
with st.sidebar:
|
118 |
st.title("Menu:")
|
119 |
uploaded_file = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button")
|
120 |
+
video_url = st.text_input("Enter Youtube Video Link: ")
|
121 |
if st.button("Submit & Process"):
|
122 |
with st.spinner("Processing..."):
|
123 |
+
if len(os.listdir("data")) !=0:
|
124 |
+
remove_old_files()
|
125 |
+
|
126 |
+
if uploaded_file:
|
127 |
+
filepath = "data/saved_pdf.pdf"
|
128 |
+
with open(filepath, "wb") as f:
|
129 |
+
f.write(uploaded_file.getbuffer())
|
130 |
+
|
131 |
+
if video_url:
|
132 |
+
extracted_text = extract_transcript_details(video_url)
|
133 |
+
with open("data/saved_text.txt", "w") as file:
|
134 |
+
file.write(extracted_text)
|
135 |
+
|
136 |
data_ingestion() # Process PDF every time new file is uploaded
|
137 |
st.success("Done")
|
138 |
|
139 |
user_prompt = st.chat_input("Ask me anything about the content of the PDF:")
|
140 |
+
if user_prompt and (uploaded_file or video_url):
|
|
|
141 |
st.session_state.messages.append({'role': 'user', "content": user_prompt})
|
142 |
+
with st.chat_message("user"):
|
143 |
st.write(user_prompt)
|
144 |
|
145 |
+
# Generate a new response if last message is not from assistant
|
146 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
147 |
+
with st.chat_message("assistant"):
|
148 |
response = handle_query(user_prompt)
|
149 |
+
full_response = st.write_stream(response)
|
150 |
+
message = {"role": "assistant", "content": full_response}
|
151 |
+
st.session_state.messages.append(message)
|
|