Paul-Joshi commited on
Commit
0e981b6
·
verified ·
1 Parent(s): ac0a4b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -2
app.py CHANGED
@@ -37,7 +37,7 @@ def method_get_vectorstore(document_chunks):
37
  vector_store = Chroma.from_documents(document_chunks, embeddings)
38
  return vector_store
39
 
40
- def get_context_retriever_chain(vector_store,question):
41
  # Initialize the retriever
42
  retriever = vector_store.as_retriever()
43
 
@@ -55,13 +55,40 @@ def get_context_retriever_chain(vector_store,question):
55
 
56
  # Construct the RAG pipeline
57
  after_rag_chain = (
58
- {"context": retriever, "question": RunnablePassthrough()}
59
  | after_rag_prompt
60
  | llm
61
  | StrOutputParser()
62
  )
63
 
 
64
  return after_rag_chain.invoke(question)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  def main():
67
  st.set_page_config(page_title="Chat with websites", page_icon="🤖")
 
37
  vector_store = Chroma.from_documents(document_chunks, embeddings)
38
  return vector_store
39
 
40
+ def get_context_retriever_chain(vector_store, question):
41
  # Initialize the retriever
42
  retriever = vector_store.as_retriever()
43
 
 
55
 
56
  # Construct the RAG pipeline
57
  after_rag_chain = (
58
+ {"context": retriever, "question": question}
59
  | after_rag_prompt
60
  | llm
61
  | StrOutputParser()
62
  )
63
 
64
+ # Invoke the RAG pipeline and return the generated answer
65
  return after_rag_chain.invoke(question)
66
+
67
+ # def get_context_retriever_chain(vector_store,question):
68
+ # # Initialize the retriever
69
+ # retriever = vector_store.as_retriever()
70
+
71
+ # # Define the RAG template
72
+ # after_rag_template = """Answer the question based only on the following context:
73
+ # {context}
74
+ # Question: {question}
75
+ # """
76
+
77
+ # # Create the RAG prompt template
78
+ # after_rag_prompt = ChatPromptTemplate.from_template(after_rag_template)
79
+
80
+ # # Initialize the Hugging Face language model (LLM)
81
+ # llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2")
82
+
83
+ # # Construct the RAG pipeline
84
+ # after_rag_chain = (
85
+ # {"context": retriever, "question": RunnablePassthrough()}
86
+ # | after_rag_prompt
87
+ # | llm
88
+ # | StrOutputParser()
89
+ # )
90
+
91
+ # return after_rag_chain.invoke(question)
92
 
93
  def main():
94
  st.set_page_config(page_title="Chat with websites", page_icon="🤖")