Paul-Joshi commited on
Commit
664b6f4
·
verified ·
1 Parent(s): 54a2185

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -12
app.py CHANGED
@@ -12,6 +12,7 @@ from langchain_core.output_parsers import StrOutputParser
12
  from langchain_core.prompts import ChatPromptTemplate
13
 
14
  from langchain_community.embeddings import HuggingFaceEmbeddings
 
15
 
16
  def method_get_website_text(urls):
17
  # Convert string of URLs to list
@@ -40,32 +41,52 @@ def method_get_vectorstore(document_chunks):
40
  # create a vectorstore from the chunks
41
  vector_store = Chroma.from_documents(document_chunks, embeddings)
42
  return vector_store
43
-
44
  def get_context_retriever_chain(vector_store,question):
45
  # Initialize the retriever
46
  retriever = vector_store.as_retriever()
47
 
48
- # Define the RAG template
49
- after_rag_template = """Answer the question based only on the following context:
50
- {context}
51
- Question: {question}
52
- """
53
-
54
- # Create the RAG prompt template
55
- after_rag_prompt = ChatPromptTemplate.from_template(after_rag_template)
56
 
57
  # Initialize the Hugging Face language model (LLM)
58
  llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature":0.6, "max_length":512})
59
 
60
  # Construct the RAG pipeline
61
- after_rag_chain = (
62
  {"context": retriever, "question": RunnablePassthrough()}
63
- | after_rag_prompt
64
  | llm
65
  | StrOutputParser()
66
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
- return after_rag_chain.invoke(question)
69
 
70
  def main():
71
  st.set_page_config(page_title="Chat with websites", page_icon="🤖")
 
12
  from langchain_core.prompts import ChatPromptTemplate
13
 
14
  from langchain_community.embeddings import HuggingFaceEmbeddings
15
+ from langchain import hub
16
 
17
  def method_get_website_text(urls):
18
  # Convert string of URLs to list
 
41
  # create a vectorstore from the chunks
42
  vector_store = Chroma.from_documents(document_chunks, embeddings)
43
  return vector_store
44
+
45
  def get_context_retriever_chain(vector_store,question):
46
  # Initialize the retriever
47
  retriever = vector_store.as_retriever()
48
 
49
+ # Define the RAG template and RAG prompt template
50
+ prompt = hub.pull("rlm/rag-prompt")
 
 
 
 
 
 
51
 
52
  # Initialize the Hugging Face language model (LLM)
53
  llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature":0.6, "max_length":512})
54
 
55
  # Construct the RAG pipeline
56
+ rag_chain = (
57
  {"context": retriever, "question": RunnablePassthrough()}
58
+ | prompt
59
  | llm
60
  | StrOutputParser()
61
  )
62
+ return rag_chain.invoke(str(question))
63
+
64
+
65
+ # def get_context_retriever_chain(vector_store,question):
66
+ # # Initialize the retriever
67
+ # retriever = vector_store.as_retriever()
68
+
69
+ # # Define the RAG template
70
+ # after_rag_template = """Answer the question based only on the following context:
71
+ # {context}
72
+ # Question: {question}
73
+ # """
74
+
75
+ # # Create the RAG prompt template
76
+ # after_rag_prompt = ChatPromptTemplate.from_template(after_rag_template)
77
+
78
+ # # Initialize the Hugging Face language model (LLM)
79
+ # llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature":0.6, "max_length":512})
80
+
81
+ # # Construct the RAG pipeline
82
+ # after_rag_chain = (
83
+ # {"context": retriever, "question": RunnablePassthrough()}
84
+ # | after_rag_prompt
85
+ # | llm
86
+ # | StrOutputParser()
87
+ # )
88
 
89
+ # return after_rag_chain.invoke(question)
90
 
91
  def main():
92
  st.set_page_config(page_title="Chat with websites", page_icon="🤖")