File size: 5,445 Bytes
23f2740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3552dca
 
23f2740
 
 
3552dca
 
 
 
23f2740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9bdeca3
23f2740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9bdeca3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import streamlit as st
import pandas as pd
from fuzzywuzzy import process
from langchain_community.llms import LlamaCpp
from langchain_core.callbacks import StreamingStdOutCallbackHandler
from langchain_core.prompts import PromptTemplate

# Load the CSV files into DataFrames with Windows-1252 encoding
df = pd.read_csv('location.csv', encoding='Windows-1252')
df2 = pd.read_csv('train.csv')

# Initialize the LlamaCpp model
llm = LlamaCpp(
    model_path="unsloth.Q5_K_M.gguf",
    temperature=0.01,
    max_tokens=500,
    top_p=3,
    callbacks=[StreamingStdOutCallbackHandler()],
    verbose=False,
    stop=["###"]
)

# Define the prompt template
template = """Below is an instruction that describes a task, paired with an input that provides further context. Write a lengthy detailed response that appropriately completes the request.

### Instruction:
{instruction}

### Input:
{input}

### Response:
{response}"""

prompt = PromptTemplate.from_template(template)


# Function to find the best matching context based on user input
def find_best_match(query):
    questions = df2['Question'].tolist()
    contexts = df2['Context'].tolist()

    # Find the best match
    best_match = process.extractOne(query, questions)
    if best_match:
        index = questions.index(best_match[0])
        return contexts[index]
    return "No relevant information found."


# Function to truncate response at the nearest full stop
def truncate_at_full_stop(text, max_length=500):
    if len(text) <= max_length:
        return text

    truncated = text[:max_length]
    print(f"Truncated text: {truncated}")

    last_period = truncated.rfind('.')
    print(f"Last period index: {last_period}")

    if last_period != -1:
        return truncated[:last_period + 1]

    return truncated


# Initialize session state for selected service, chat history, and AI history
if 'selected_service' not in st.session_state:
    st.session_state.selected_service = "Home"
if 'chat_history' not in st.session_state:
    st.session_state.chat_history = []
if 'history' not in st.session_state:
    st.session_state.history = []
if 'input' not in st.session_state:
    st.session_state['input'] = ''

# Sidebar for selecting services
with st.sidebar:
    st.title("Select the Service")

    # Create buttons for each service
    if st.button('Medicine Services'):
        st.session_state.selected_service = "Medicine Services"

    if st.button('Kendra Locator'):
        st.session_state.selected_service = "Kendra Locator"

    if st.button('Assistant'):
        st.session_state.selected_service = "Assistant"

# Main content area based on selected service
if st.session_state.selected_service == "Home":
    st.title("Welcome to Medical Service Center")
    st.write("Explore the options in the sidebar to get started.")

elif st.session_state.selected_service == "Medicine Services":
    st.title("Medicine Services")

    # Display chat history
    for chat in st.session_state.chat_history:
        st.write(f"**User:** {chat['user']}")
        st.write(f"**Bot:** {chat['bot']}")

    # User input section
    def handle_input():
        user_input = st.session_state['input']
        if user_input:
            response = find_best_match(user_input)
            st.session_state.chat_history.append({"user": user_input, "bot": response})
            st.session_state['input'] = ''

    # Persistent text input at the top
    st.text_input("Enter medicine:", key="input", on_change=handle_input)

elif st.session_state.selected_service == "Kendra Locator":
    st.title("Kendra Locator")
    display_option = st.selectbox("Select:", ["Address", "Email"])
    pin_code_input = st.text_input("Enter Pin Code:")

    if st.button("Locate"):
        if pin_code_input:
            result = df[df['Pin'].astype(str) == pin_code_input]
            if not result.empty:
                if display_option == "Address":
                    st.write(f"Address: {result['Address'].values[0]}")
                elif display_option == "Email":
                    st.write(f"Email: {result['Email'].values[0]}")
            else:
                st.write("No results found.")
        else:
            st.write("Please enter a pin code.")

elif st.session_state.selected_service == "Assistant":
    st.title("Query Assistance")

    # Display AI chat history
    for chat in st.session_state.history:
        st.write(f"**User Query:** {chat['user']}")
        st.write(f"**Chatbot:** {chat['bot']}")

    # Function to handle user input
    def handle_input():
        user_input = st.session_state['input']
        if user_input:
            # Format the prompt
            formatted_prompt = prompt.format(
                instruction="You are an all-knowing Medical AI. Provide detailed responses to only medicine-related queries.",
                input=user_input,
                response=""  # Leave this blank for generation!
            )

            # Generate response
            response = llm.invoke(formatted_prompt)

            # Truncate response if necessary
            truncated_response = truncate_at_full_stop(response)

            # Update the chat history
            st.session_state.history.append({"user": user_input, "bot": truncated_response})

            # Clear the input box
            st.session_state['input'] = ''

    # Persistent text input at the top
    st.text_input("Enter Query:", key="input", on_change=handle_input)