Spaces:
Running
Running
Commit
·
0ec9aa9
1
Parent(s):
7e89a20
Upload 2 files
Browse files- app.py +73 -0
- requirements.txt +0 -0
app.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_chat import message
|
3 |
+
from langchain.llms import CTransformers
|
4 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
+
from langchain.chains import ConversationChain
|
6 |
+
from langchain.schema.output_parser import StrOutputParser
|
7 |
+
from langchain.memory import ConversationBufferMemory
|
8 |
+
from langchain import PromptTemplate, LLMChain
|
9 |
+
|
10 |
+
#create llm
|
11 |
+
llm = CTransformers(model="Israr-dawar/psychology_chatbot_quantized_model",model_type="llama",
|
12 |
+
config={'max_new_tokens':128,'temperature':0.01})
|
13 |
+
|
14 |
+
def should_finish(next_input):
|
15 |
+
"""Returns True if the next input indicates that the user wants to finish the conversation."""
|
16 |
+
return next_input.lower() == "exit"
|
17 |
+
|
18 |
+
# Create a prompt template
|
19 |
+
template = """
|
20 |
+
You are a good psychologist. Please share your thoughts on the following text:
|
21 |
+
`{text}`
|
22 |
+
Now, could you please ask a question related to this `{text}`?
|
23 |
+
"""
|
24 |
+
prompt = PromptTemplate(template=template, input_variables=["text"])
|
25 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
26 |
+
|
27 |
+
|
28 |
+
# LLM chain with ConversationalBufferMemory object
|
29 |
+
chain = LLMChain(prompt=prompt, llm=llm, memory = memory, output_parser = StrOutputParser())
|
30 |
+
|
31 |
+
|
32 |
+
st.title("Psychology ChatBot 🧑🏽⚕️")
|
33 |
+
def conversation_chat(query):
|
34 |
+
result = chain({"question": query, "chat_history": st.session_state['history']})
|
35 |
+
print("restult: ",result)
|
36 |
+
st.session_state['history'].append((query, result["answer"]))
|
37 |
+
return result["answer"]
|
38 |
+
|
39 |
+
def initialize_session_state():
|
40 |
+
if 'history' not in st.session_state:
|
41 |
+
st.session_state['history'] = []
|
42 |
+
|
43 |
+
if 'generated' not in st.session_state:
|
44 |
+
st.session_state['generated'] = ["Hello! Ask me anything about 🤗"]
|
45 |
+
|
46 |
+
if 'past' not in st.session_state:
|
47 |
+
st.session_state['past'] = ["Hey! 👋"]
|
48 |
+
|
49 |
+
def display_chat_history():
|
50 |
+
reply_container = st.container()
|
51 |
+
container = st.container()
|
52 |
+
|
53 |
+
with container:
|
54 |
+
with st.form(key='my_form', clear_on_submit=True):
|
55 |
+
user_input = st.text_input("Question:", placeholder="Ask about your Mental Health", key='input')
|
56 |
+
submit_button = st.form_submit_button(label='Send')
|
57 |
+
|
58 |
+
if submit_button and user_input:
|
59 |
+
output = conversation_chat(user_input)
|
60 |
+
|
61 |
+
st.session_state['past'].append(user_input)
|
62 |
+
st.session_state['generated'].append(output)
|
63 |
+
|
64 |
+
if st.session_state['generated']:
|
65 |
+
with reply_container:
|
66 |
+
for i in range(len(st.session_state['generated'])):
|
67 |
+
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
|
68 |
+
message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
|
69 |
+
|
70 |
+
# Initialize session state
|
71 |
+
initialize_session_state()
|
72 |
+
# Display chat history
|
73 |
+
display_chat_history()
|
requirements.txt
ADDED
Binary file (2.77 kB). View file
|
|