Spaces:
Sleeping
Sleeping
from langchain.llms import LlamaCpp | |
from langchain import PromptTemplate, LLMChain | |
from langchain.callbacks.manager import CallbackManager | |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler | |
#who is salma | |
en_Pr=""" | |
who is salma | |
""" | |
context=["ูู ุณูู ู.pdf:ุณูู ู ูู ู ููุฏุณุฉ","ูู ุณูู ู.pdf:ุณูู ู ูู ุทุงูุจุฉ ูู ูููุฉ ุญุงุณุจุงุช ู ู ุนููู ุงุช","ูู ุงุฑูููุงุจ.pdf:ุณูู ู ูู ู ูุถูุฉ ูู ูุณู ุงูุฐูุงุก ุงูุงุตุทูุงุนู"] | |
en_Cont=['in salma_ahmed.pdf: salma is a computer developer', 'in salmaaaaa3333.pdf: salma is an employee in arkleap ', 'in salmaaaaa3333.pdf: salma works from 9 to 5 ', 'in italy_emploee.pdf: salma is a agent who works as a spy ', 'in zolompa7.pdf:'] | |
# template = """you are given contest of answers of question on multiple pdfs with format "in [pdfname]:[answer of the query in the pdf]" | |
# Answer the following question with related reasoning answers from the following contexts that is given in list format for each pdf name with all possible answers for it , don't mix the answers of different pdfs together , only give answers for each pdf individually" | |
# ..Don't generate answer from your data generate only from the provided contexts | |
# answer only as from the provided data ..if it's the answer make reasoning | |
# ..If the contexts doesn't provide an answer or isn't related to the question, respond only with "there is no answer for the provided question" | |
# if question in a language and the context in another language but there is an answer ..translate and generate answer with the two different languages | |
# question:{question} | |
# context:{context} | |
# answer: | |
# """ | |
def llama_local(query,context): | |
system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions. | |
Read the given context before answering questions and think step by step. If you can not answer a user question based on | |
the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question.""" | |
B_INST, E_INST = "[INST]", "[/INST]" | |
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" | |
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS | |
instruction = """ | |
Context: {context} | |
User: {question}""" | |
prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST | |
prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"]) | |
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) | |
llm = LlamaCpp( | |
model_path="C:\\Users\zeyad\Desktop\pythonProject3\\trainmodel\llama-13B-Q4_K_M.gguf", | |
callback_manager=callback_manager, | |
verbose=True, | |
temperature=0, | |
top_p=1 | |
) | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
return llm_chain.run(question=query, context=context) | |
# system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions. | |
# Read the given context before answering questions and think step by step. If you can not answer a user question based on | |
# the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question.""" | |
# B_INST, E_INST = "[INST]", "[/INST]" | |
# B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" | |
# SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS | |
# instruction = """ | |
# Context: {context} | |
# User: {question}""" | |
# prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST | |
# prompt=PromptTemplate(template=prompt_template, input_variables=["question","context"]) | |
# callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) | |
# llm = LlamaCpp( | |
# model_path="C:\\Users\zeyad\Desktop\pythonProject3\\trainmodel\llama-13B-Q4_K_M.gguf", | |
# callback_manager=callback_manager, | |
# verbose=True, | |
# temperature=0, | |
# top_p=1 | |
# ) | |
# llm_chain = LLMChain(prompt=prompt, llm=llm) | |
# llm_chain.run(question=en_Pr,context=en_Cont) |