Update app.py
Browse files
app.py
CHANGED
@@ -2,9 +2,8 @@ import os
|
|
2 |
import streamlit as st
|
3 |
from langchain_chroma import Chroma
|
4 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
5 |
-
from langchain.chains.question_answering import load_qa_chain
|
6 |
from langchain.memory import ConversationBufferMemory
|
7 |
-
from
|
8 |
from langchain_groq import ChatGroq
|
9 |
from dotenv import load_dotenv
|
10 |
from sentence_transformers import SentenceTransformer
|
@@ -23,8 +22,7 @@ if 'initialized' not in st.session_state:
|
|
23 |
try:
|
24 |
with st.spinner("Initializing..."):
|
25 |
# Initialize embeddings model
|
26 |
-
model_path = "sentence-transformers/all-MiniLM-L12-v2"
|
27 |
-
|
28 |
st.session_state.embedding_function = HuggingFaceEmbeddings(
|
29 |
model_name=model_path,
|
30 |
model_kwargs={'device': 'cpu'},
|
@@ -61,7 +59,7 @@ if 'initialized' not in st.session_state:
|
|
61 |
# Load QA chain
|
62 |
st.session_state.qa_chain = load_qa_chain(
|
63 |
llm=st.session_state.chat_model,
|
64 |
-
chain_type="
|
65 |
memory=st.session_state.memory,
|
66 |
prompt=prompt
|
67 |
)
|
@@ -75,12 +73,12 @@ if 'initialized' not in st.session_state:
|
|
75 |
|
76 |
# Clear chat history buttons
|
77 |
if st.button("Clear Chat History"):
|
78 |
-
if 'memory' in st.session_state:
|
79 |
st.session_state.memory.clear()
|
80 |
st.experimental_rerun() # Refresh the app to reflect the cleared history
|
81 |
|
82 |
# Display chat history if initialized
|
83 |
-
if st.session_state.initialized and 'memory' in st.session_state:
|
84 |
if st.session_state.memory.buffer_as_messages:
|
85 |
for message in st.session_state.memory.buffer_as_messages:
|
86 |
if message.type == "ai":
|
@@ -89,20 +87,21 @@ if st.session_state.initialized and 'memory' in st.session_state:
|
|
89 |
st.chat_message(name="human", avatar="👤").write(message.content)
|
90 |
|
91 |
# Input for new query
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
|
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
|
107 |
-
|
108 |
-
|
|
|
2 |
import streamlit as st
|
3 |
from langchain_chroma import Chroma
|
4 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
|
|
5 |
from langchain.memory import ConversationBufferMemory
|
6 |
+
from langchain.prompts import PromptTemplate
|
7 |
from langchain_groq import ChatGroq
|
8 |
from dotenv import load_dotenv
|
9 |
from sentence_transformers import SentenceTransformer
|
|
|
22 |
try:
|
23 |
with st.spinner("Initializing..."):
|
24 |
# Initialize embeddings model
|
25 |
+
model_path = "sentence-transformers/all-MiniLM-L12-v2"
|
|
|
26 |
st.session_state.embedding_function = HuggingFaceEmbeddings(
|
27 |
model_name=model_path,
|
28 |
model_kwargs={'device': 'cpu'},
|
|
|
59 |
# Load QA chain
|
60 |
st.session_state.qa_chain = load_qa_chain(
|
61 |
llm=st.session_state.chat_model,
|
62 |
+
chain_type="question_answering",
|
63 |
memory=st.session_state.memory,
|
64 |
prompt=prompt
|
65 |
)
|
|
|
73 |
|
74 |
# Clear chat history buttons
|
75 |
if st.button("Clear Chat History"):
|
76 |
+
if 'memory' in st.session_state and st.session_state.memory:
|
77 |
st.session_state.memory.clear()
|
78 |
st.experimental_rerun() # Refresh the app to reflect the cleared history
|
79 |
|
80 |
# Display chat history if initialized
|
81 |
+
if st.session_state.initialized and 'memory' in st.session_state and st.session_state.memory:
|
82 |
if st.session_state.memory.buffer_as_messages:
|
83 |
for message in st.session_state.memory.buffer_as_messages:
|
84 |
if message.type == "ai":
|
|
|
87 |
st.chat_message(name="human", avatar="👤").write(message.content)
|
88 |
|
89 |
# Input for new query
|
90 |
+
if 'initialized' in st.session_state and st.session_state.initialized:
|
91 |
+
query = st.chat_input("Ask something")
|
92 |
+
if query:
|
93 |
+
try:
|
94 |
+
with st.spinner("Answering..."):
|
95 |
+
# Perform similarity search and get response
|
96 |
+
docs = st.session_state.docsearch.similarity_search(query, k=1)
|
97 |
+
response = st.session_state.qa_chain(
|
98 |
+
{"input_documents": docs, "human_input": query},
|
99 |
+
return_only_outputs=True
|
100 |
+
)["output_text"]
|
101 |
|
102 |
+
# Display new message
|
103 |
+
st.chat_message(name="human", avatar="👤").write(query)
|
104 |
+
st.chat_message(name="ai", avatar="🤖").write(response)
|
105 |
|
106 |
+
except Exception as e:
|
107 |
+
st.error(f"An error occurred: {e}")
|