mohamedashraf11's picture
Upload 4 files
b2ed690 verified
from langchain_ollama import OllamaLLM
import similarity
from langchain.chains.question_answering import load_qa_chain
from langchain import PromptTemplate
# Initialize an instance of the Ollama model
llm = OllamaLLM(model="llama3.2")
# query_text = "ما فضل صلاة العصر؟"
# print(f'Query : {query_text}')
# similar_docs = similarity.get_similar_docs(query_text)
# # print(f'similar_docs : {similar_docs}')
# qna_template = '\n'.join([
# "Answer the following question using the context provided.",
# 'please provide answer within context with details If exist.'
# "If the answer is not included in the context, say ",
# "No answer available",
# "### Context:",
# "{context}",
# """,
# "### Question:",
# "{question}",
# """,
# "### Answer:",
# ])
# qna_prompt = PromptTemplate(
# template = qna_template,
# input_variables=['context', 'question'],
# verbose=True
# )
# stuff_chain = load_qa_chain(llm, chain_type="stuff", prompt=qna_prompt)
# final_answer = stuff_chain({
# "input_documents": similar_docs,
# "question": query_text
# }, return_only_outputs=True)
# print(final_answer)
def ask_llms(query_text):
similar_docs = similarity.get_similar_docs(query_text)
# print(f'similar_docs : {similar_docs}')
qna_template = '\n'.join([
"Answer the following question using the context provided.",
"If the answer is not included in the context, say ",
"No answer available",
"### Context:",
"{context}",
""",
"### Question:",
"{question}",
""",
"### Answer:",
])
qna_prompt = PromptTemplate(
template = qna_template,
input_variables=['context', 'question'],
verbose=True
)
stuff_chain = load_qa_chain(llm, chain_type="stuff", prompt=qna_prompt)
final_answer = stuff_chain.invoke({
"input_documents": similar_docs,
"question": query_text
})
return final_answer['output_text']