Waflon commited on
Commit
dca217d
1 Parent(s): 46d6005

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -1
app.py CHANGED
@@ -1 +1,102 @@
1
- print("hello")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_chat import message
3
+ from langchain.chains import ConversationalRetrievalChain
4
+ from langchain.document_loaders import PyPDFLoader, DirectoryLoader
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+ from langchain.llms import CTransformers
7
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
8
+ from langchain.vectorstores import FAISS
9
+ from langchain.memory import ConversationBufferMemory
10
+
11
+ # Function to load documents
12
+ def load_documents():
13
+ loader = DirectoryLoader('data/', glob="*.pdf", loader_cls=PyPDFLoader)
14
+ documents = loader.load()
15
+ return documents
16
+
17
+ # Function to split text into chunks
18
+ def split_text_into_chunks(documents):
19
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
20
+ text_chunks = text_splitter.split_documents(documents)
21
+ return text_chunks
22
+
23
+ # Function to create embeddings
24
+ def create_embeddings():
25
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': "cpu"})
26
+ return embeddings
27
+
28
+ # Function to create vector store
29
+ def create_vector_store(text_chunks, embeddings):
30
+ vector_store = FAISS.from_documents(text_chunks, embeddings)
31
+ return vector_store
32
+
33
+ # Function to create LLMS model
34
+ def create_llms_model():
35
+ llm = CTransformers(model="mistral-7b-instruct-v0.1.Q4_K_M.gguf", config={'max_new_tokens': 128, 'temperature': 0.01})
36
+ return llm
37
+
38
+ # Initialize Streamlit app
39
+ st.title("Job Interview Prep ChatBot")
40
+ st.title("Personalized Job Success Friend")
41
+ st.markdown('<style>h1{color: orange; text-align: center;}</style>', unsafe_allow_html=True)
42
+ st.subheader('Get Your Desired Job 💪')
43
+ st.markdown('<style>h3{color: pink; text-align: center;}</style>', unsafe_allow_html=True)
44
+
45
+ # loading of documents
46
+ documents = load_documents()
47
+
48
+ # Split text into chunks
49
+ text_chunks = split_text_into_chunks(documents)
50
+
51
+ # Create embeddings
52
+ embeddings = create_embeddings()
53
+
54
+ # Create vector store
55
+ vector_store = create_vector_store(text_chunks, embeddings)
56
+
57
+ # Create LLMS model
58
+ llm = create_llms_model()
59
+
60
+ # Initialize conversation history
61
+ if 'history' not in st.session_state:
62
+ st.session_state['history'] = []
63
+
64
+ if 'generated' not in st.session_state:
65
+ st.session_state['generated'] = ["Hello! Ask me anything about 🤗"]
66
+
67
+ if 'past' not in st.session_state:
68
+ st.session_state['past'] = ["Hey! 👋"]
69
+
70
+ # Create memory
71
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
72
+
73
+ # Create chain
74
+ chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
75
+ retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
76
+ memory=memory)
77
+
78
+ # Define chat function
79
+ def conversation_chat(query):
80
+ result = chain({"question": query, "chat_history": st.session_state['history']})
81
+ st.session_state['history'].append((query, result["answer"]))
82
+ return result["answer"]
83
+
84
+ # Display chat history
85
+ reply_container = st.container()
86
+ container = st.container()
87
+
88
+ with container:
89
+ with st.form(key='my_form', clear_on_submit=True):
90
+ user_input = st.text_input("Question:", placeholder="Ask about your Job Interview", key='input')
91
+ submit_button = st.form_submit_button(label='Send')
92
+
93
+ if submit_button and user_input:
94
+ output = conversation_chat(user_input)
95
+ st.session_state['past'].append(user_input)
96
+ st.session_state['generated'].append(output)
97
+
98
+ if st.session_state['generated']:
99
+ with reply_container:
100
+ for i in range(len(st.session_state['generated'])):
101
+ message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
102
+ message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")