zac's picture
Update app.py
0296cd8
raw
history blame contribute delete
359 Bytes
from llama_cpp import llama
from huggingface_hub import hf_hub_download #load from huggingfaces
print("loading . . .")
llm = llama(model_path= hf_hub_download(repo_id="TheBloke/Vigogne-2-7B-Instruct-GGML", filename="vigogne-2-7b-instruct.ggmlv3.q4_1.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
print("model is loaded")