from llama_cpp import llama from huggingface_hub import hf_hub_download #load from huggingfaces print("loading . . .") llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/Vigogne-2-7B-Instruct-GGML", filename="vigogne-2-7b-instruct.ggmlv3.q4_1.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length print("model is loaded")