Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import os
|
|
6 |
# from langchain_experimental.llms import ChatLlamaAPI
|
7 |
from langchain.embeddings import HuggingFaceEmbeddings
|
8 |
from pinecone import Pinecone
|
9 |
-
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from langchain.chains import RetrievalQA
|
12 |
import streamlit.components.v1 as components
|
@@ -14,9 +14,12 @@ from langchain_groq import ChatGroq
|
|
14 |
from langchain.chains import ConversationalRetrievalChain
|
15 |
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
|
16 |
import time
|
|
|
|
|
|
|
17 |
|
18 |
HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN']
|
19 |
-
|
20 |
@dataclass
|
21 |
class Message:
|
22 |
"""Class for keeping track of a chat message."""
|
@@ -39,17 +42,22 @@ def initialize_session_state():
|
|
39 |
|
40 |
embeddings = download_hugging_face_embeddings()
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
index_name
|
|
|
|
|
|
|
|
|
46 |
|
47 |
docsearch = Pinecone.from_existing_index(index_name, embeddings)
|
48 |
|
49 |
prompt_template = """
|
50 |
You are a trained bot to guide people about their medical concerns. You will answer user's query with your knowledge and the context provided.
|
|
|
51 |
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
52 |
-
|
53 |
Use the following pieces of context to answer the users question.
|
54 |
Context: {context}
|
55 |
Question: {question}
|
@@ -70,7 +78,7 @@ def initialize_session_state():
|
|
70 |
retrieval_chain = ConversationalRetrievalChain.from_llm(llm=chat,
|
71 |
chain_type="stuff",
|
72 |
retriever=docsearch.as_retriever(
|
73 |
-
search_kwargs={'k':
|
74 |
return_source_documents=True,
|
75 |
combine_docs_chain_kwargs={"prompt": PROMPT},
|
76 |
memory= memory
|
@@ -96,7 +104,7 @@ def on_click_callback():
|
|
96 |
|
97 |
initialize_session_state()
|
98 |
|
99 |
-
st.title("
|
100 |
|
101 |
# st.markdown(
|
102 |
# """
|
|
|
6 |
# from langchain_experimental.llms import ChatLlamaAPI
|
7 |
from langchain.embeddings import HuggingFaceEmbeddings
|
8 |
from pinecone import Pinecone
|
9 |
+
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from langchain.chains import RetrievalQA
|
12 |
import streamlit.components.v1 as components
|
|
|
14 |
from langchain.chains import ConversationalRetrievalChain
|
15 |
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
|
16 |
import time
|
17 |
+
import os
|
18 |
+
os.environ['PINECONE_API_KEY'] = "fa944c7c-5775-4a96-8704-e04f7a86614e"
|
19 |
+
pc = Pinecone()
|
20 |
|
21 |
HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN']
|
22 |
+
from langchain.vectorstores import Pinecone
|
23 |
@dataclass
|
24 |
class Message:
|
25 |
"""Class for keeping track of a chat message."""
|
|
|
42 |
|
43 |
embeddings = download_hugging_face_embeddings()
|
44 |
|
45 |
+
|
46 |
+
index_name = "medical-advisor"
|
47 |
+
|
48 |
+
if index_name in pc.list_indexes().names():
|
49 |
+
print("index already exists" , index_name)
|
50 |
+
index= pc.Index(index_name) #your index which is already existing and is ready to use
|
51 |
+
print(index.describe_index_stats())
|
52 |
+
# put in the name of your pinecone index here
|
53 |
|
54 |
docsearch = Pinecone.from_existing_index(index_name, embeddings)
|
55 |
|
56 |
prompt_template = """
|
57 |
You are a trained bot to guide people about their medical concerns. You will answer user's query with your knowledge and the context provided.
|
58 |
+
If question given by user ask about personal details about them refer to last user inputs.
|
59 |
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
|
60 |
+
You have to act as conversational bot. Paying attention to the personal details given in the context and respond helpfully. Provide personal detail if asked
|
61 |
Use the following pieces of context to answer the users question.
|
62 |
Context: {context}
|
63 |
Question: {question}
|
|
|
78 |
retrieval_chain = ConversationalRetrievalChain.from_llm(llm=chat,
|
79 |
chain_type="stuff",
|
80 |
retriever=docsearch.as_retriever(
|
81 |
+
search_kwargs={'k': 3}),
|
82 |
return_source_documents=True,
|
83 |
combine_docs_chain_kwargs={"prompt": PROMPT},
|
84 |
memory= memory
|
|
|
104 |
|
105 |
initialize_session_state()
|
106 |
|
107 |
+
st.title("Medical Advisor Chatbot 🇮🇳")
|
108 |
|
109 |
# st.markdown(
|
110 |
# """
|