Spaces:
Sleeping
Sleeping
import os | |
import requests | |
from llama_cpp import Llama | |
# Create the directory if it doesn't exist | |
local_dir = "models" | |
os.makedirs(local_dir, exist_ok=True) | |
# Specify the filename for the model | |
filename = "unsloth.Q4_K_M.gguf" | |
model_path = os.path.join(local_dir, filename) | |
# Function to download the model file | |
def download_model(repo_id, filename, save_path): | |
# Construct the URL for the model file | |
url = f"https://huggingface.co/{repo_id}/resolve/main/{filename}" | |
# Download the model file | |
response = requests.get(url) | |
if response.status_code == 200: | |
with open(save_path, 'wb') as f: | |
f.write(response.content) | |
print(f"Model downloaded to {save_path}") | |
else: | |
print(f"Failed to download model: {response.status_code}") | |
# Download the model if it doesn't exist | |
if not os.path.exists(model_path): | |
download_model("PurpleAILAB/Llama3.2-3B-uncensored-SQLi-Q4_K_M-GGUF", filename, model_path) | |
# Load the model from the specified path | |
llm = Llama.from_pretrained( | |
repo_id="PurpleAILAB/Llama3.2-3B-uncensored-SQLi-Q4_K_M-GGUF", | |
filename=model_path, | |
) | |
# Create a chat completion | |
response = llm.create_chat_completion( | |
messages=[ | |
{ | |
"role": "user", | |
"content": "What is the capital of France?" | |
} | |
] | |
) | |
# Print the response | |
print(response) | |