Spaces:
Sleeping
Sleeping
Mr-Vicky-01
commited on
Commit
โข
34ef943
1
Parent(s):
7f2419c
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
6 |
from llama_index.core import Settings
|
7 |
import os
|
8 |
import base64
|
|
|
9 |
|
10 |
# Load environment variables
|
11 |
load_dotenv()
|
@@ -71,6 +72,11 @@ def handle_query(query):
|
|
71 |
else:
|
72 |
return "Sorry, I couldn't find an answer."
|
73 |
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
# Streamlit app initialization
|
76 |
st.title("Chat with your PDF ๐ฆ๐")
|
@@ -106,7 +112,7 @@ if user_prompt and uploaded_file:
|
|
106 |
# Trigger assistant's response retrieval and update UI
|
107 |
with st.spinner("Thinking..."):
|
108 |
response = handle_query(user_prompt)
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
|
|
|
6 |
from llama_index.core import Settings
|
7 |
import os
|
8 |
import base64
|
9 |
+
import time
|
10 |
|
11 |
# Load environment variables
|
12 |
load_dotenv()
|
|
|
72 |
else:
|
73 |
return "Sorry, I couldn't find an answer."
|
74 |
|
75 |
+
def streamer(text):
|
76 |
+
for i in text:
|
77 |
+
yield i
|
78 |
+
time.sleep(0.01)
|
79 |
+
|
80 |
|
81 |
# Streamlit app initialization
|
82 |
st.title("Chat with your PDF ๐ฆ๐")
|
|
|
112 |
# Trigger assistant's response retrieval and update UI
|
113 |
with st.spinner("Thinking..."):
|
114 |
response = handle_query(user_prompt)
|
115 |
+
with st.chat_message("user", avatar="robot.png"):
|
116 |
+
st.write_stream(streamer(response))
|
117 |
+
st.session_state.messages.append({'role': 'assistant', "content": response})
|
118 |
|