HTAR5 commited on
Commit
1d1a2f4
1 Parent(s): 58cbf26

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -0
app.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_cpp import Llama
2
+ import streamlit as st
3
+ from langchain.llms.base import LLM
4
+ from llama_index import LLMPredictor, LangchainEmbedding, ServiceContext, PromptHelper
5
+ from typing import Optional, List, Mapping, Any
6
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
7
+ import pandas as pd
8
+
9
+ # Set the page config as the first command
10
+ st.set_page_config(page_title='Mental Heallth chatbot', page_icon=':robot_face:', layout='wide')
11
+
12
+ # Define constants
13
+ MODEL_NAME = 'mellogpt.Q3_K_S.gguf'
14
+ MODEL_PATH = 'D:\python\Mantal Health 1\python\chatbot\mellogpt.Q3_K_S.gguf'
15
+ KNOWLEDGE_BASE_FILE = "mentalhealth.csv"
16
+
17
+ # Configuration
18
+ NUM_THREADS = 8
19
+ MAX_INPUT_SIZE = 2048
20
+ NUM_OUTPUT = 256
21
+ CHUNK_OVERLAP_RATIO = 0.10
22
+
23
+ # Initialize prompt helper with fallback on exception
24
+ try:
25
+ prompt_helper = PromptHelper(MAX_INPUT_SIZE, NUM_OUTPUT, CHUNK_OVERLAP_RATIO)
26
+ except Exception as e:
27
+ CHUNK_OVERLAP_RATIO = 0.2
28
+ prompt_helper = PromptHelper(MAX_INPUT_SIZE, NUM_OUTPUT, CHUNK_OVERLAP_RATIO)
29
+
30
+ embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
31
+
32
+ class CustomLLM(LLM):
33
+ model_name = MODEL_NAME
34
+
35
+ def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
36
+ p = f"Human: {prompt} Assistant: "
37
+ prompt_length = len(p)
38
+ llm = Llama(model_path=MODEL_PATH, n_threads=NUM_THREADS)
39
+ try:
40
+ output = llm(p, max_tokens=512, stop=["Human:"], echo=True)['choices'][0]['text']
41
+ response = output[prompt_length:]
42
+ st.session_state.messages.append({"role": "user", "content": prompt})
43
+ st.session_state.messages.append({"role": "assistant", "content": response})
44
+ except Exception as e:
45
+ st.error("An error occurred while processing your request. Please try again.")
46
+
47
+ @property
48
+ def _identifying_params(self) -> Mapping[str, Any]:
49
+ return {"name_of_model": self.model_name}
50
+
51
+ @property
52
+ def _llm_type(self) -> str:
53
+ return "custom"
54
+
55
+ # Cache functions using the new methods
56
+ @st.cache_resource
57
+ def load_model():
58
+ return CustomLLM()
59
+
60
+ @st.cache_data
61
+ def load_knowledge_base():
62
+ df = pd.read_csv(KNOWLEDGE_BASE_FILE)
63
+ return dict(zip(df['Questions'].str.lower(), df['Answers']))
64
+
65
+ def clear_convo():
66
+ st.session_state['messages'] = []
67
+
68
+ def init():
69
+ if 'messages' not in st.session_state:
70
+ st.session_state['messages'] = []
71
+
72
+ # Main function
73
+ if __name__ == '__main__':
74
+ init()
75
+ knowledge_base = load_knowledge_base()
76
+ llm = load_model()
77
+
78
+ clear_button = st.sidebar.button("Clear Conversation")
79
+ if clear_button:
80
+ clear_convo()
81
+
82
+ user_input = st.text_input("Enter your query:", key="user_input")
83
+ if user_input:
84
+ user_input = user_input.lower()
85
+ answer = knowledge_base.get(user_input)
86
+ if answer:
87
+ st.session_state.messages.append({"role": "user", "content": user_input})
88
+ st.session_state.messages.append({"role": "assistant", "content": answer})
89
+ else:
90
+ llm._call(prompt=user_input)
91
+
92
+ for message in st.session_state.messages:
93
+ with st.container():
94
+ st.markdown(f"**{message['role'].title()}**: {message['content']}")
95
+