File size: 5,783 Bytes
988cf58
 
 
 
 
 
 
 
bcf5e23
988cf58
 
 
 
 
 
 
bcf5e23
 
 
988cf58
 
bcf5e23
988cf58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6407d03
988cf58
 
 
bcf5e23
 
 
 
 
 
 
 
988cf58
 
 
 
dbf186d
3d83cc2
988cf58
3d83cc2
dbf186d
3d83cc2
988cf58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcf5e23
988cf58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcf5e23
988cf58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
from dataclasses import dataclass
from typing import Literal
import streamlit as st
import os
# from llamaapi import LlamaAPI
# from langchain_experimental.llms import ChatLlamaAPI
from langchain.embeddings import HuggingFaceEmbeddings
from pinecone import Pinecone

from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
import streamlit.components.v1 as components
from langchain_groq import ChatGroq
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
import time
import os
os.environ['PINECONE_API_KEY'] = "fa944c7c-5775-4a96-8704-e04f7a86614e"
pc = Pinecone()

HUGGINGFACEHUB_API_TOKEN = st.secrets['HUGGINGFACEHUB_API_TOKEN']
from langchain.vectorstores import Pinecone
@dataclass
class Message:
    """Class for keeping track of a chat message."""
    origin: Literal["๐Ÿ‘ค Human", "๐Ÿ‘จ๐Ÿปโ€โš–๏ธ Ai"]
    message: str


def download_hugging_face_embeddings():
    embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
    return embeddings


def initialize_session_state():
    if "history" not in st.session_state:
        st.session_state.history = []
    if "conversation" not in st.session_state:
        # llama = LlamaAPI(st.secrets["LlamaAPI"])
        # model = ChatLlamaAPI(client=llama)
        chat = ChatGroq(temperature=0.5, groq_api_key=st.secrets["Groq_api"], model_name="mixtral-8x7b-32768")

        embeddings = download_hugging_face_embeddings()

        
        index_name = "medical-advisor"

        if index_name in pc.list_indexes().names():
            print("index already exists" , index_name)
            index= pc.Index(index_name) #your index which is already existing and is ready to use
            print(index.describe_index_stats())
          # put in the name of your pinecone index here

        docsearch = Pinecone.from_existing_index(index_name, embeddings)

        prompt_template = """
            You are a trained bot to guide people about their medical concerns acting as Doctor. You will answer user's query with your knowledge and the context provided. 
            
            If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.

            Provide very detailed answer as Doctor.
            
            Use the following pieces of context to answer the users question.
            Context: {context}
            Question: {question}
            Only return the helpful answer below and nothing else.
            Helpful answer:
            """

        PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
        
        #chain_type_kwargs = {"prompt": PROMPT}
        message_history = ChatMessageHistory()
        memory = ConversationBufferMemory(
            memory_key="chat_history",
            output_key="answer",
            chat_memory=message_history,
            return_messages=True,
            )
        retrieval_chain = ConversationalRetrievalChain.from_llm(llm=chat,
                                                      chain_type="stuff",
                                                      retriever=docsearch.as_retriever(
                                                          search_kwargs={'k': 3}),
                                                      return_source_documents=True,
                                                      combine_docs_chain_kwargs={"prompt": PROMPT},
                                                      memory= memory
                                                     )

        st.session_state.conversation = retrieval_chain


def on_click_callback():
    human_prompt = st.session_state.human_prompt
    st.session_state.human_prompt=""
    response = st.session_state.conversation(
        human_prompt
    )
    llm_response = response['answer']
    st.session_state.history.append(
        Message("๐Ÿ‘ค Human", human_prompt)
    )
    st.session_state.history.append(
        Message("๐Ÿ‘จ๐Ÿปโ€โš–๏ธ Ai", llm_response)
    )


initialize_session_state()

st.title("Medical Advisor Chatbot ๐Ÿ‡ฎ๐Ÿ‡ณ")

# st.markdown(
#     """
#     ๐Ÿ‘‹ **Namaste! Welcome to LegalEase Advisor!**
#     I'm here to assist you with your legal queries within the framework of Indian law. Whether you're navigating through specific legal issues or seeking general advice, I'm here to help.
    
#     ๐Ÿ“š **How I Can Assist:**
    
#     - Answer questions on various aspects of Indian law.
#     - Guide you through legal processes relevant to India.
#     - Provide information on your rights and responsibilities as per Indian legal standards.
    
#     โš–๏ธ **Disclaimer:**
    
#     While I can provide general information, it's essential to consult with a qualified Indian attorney for advice tailored to your specific situation.
    
#     ๐Ÿค– **Getting Started:**
    
#     Feel free to ask any legal question related to Indian law, using keywords like "property rights," "labor laws," or "family law." I'm here to assist you!
#     Let's get started! How can I assist you today?
#     """
# )

chat_placeholder = st.container()
prompt_placeholder = st.form("chat-form")

with chat_placeholder:
    for chat in st.session_state.history:
        st.markdown(f"{chat.origin} : {chat.message}")

with prompt_placeholder:
    st.markdown("**Chat**")
    cols = st.columns((6, 1))
    cols[0].text_input(
        "Chat",
        label_visibility="collapsed",
        key="human_prompt",
    )
    cols[1].form_submit_button(
        "Submit",
        type="primary",
        on_click=on_click_callback,
    )