File size: 1,353 Bytes
79929e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import os
import requests
from llama_cpp import Llama

# Create the directory if it doesn't exist
local_dir = "models"
os.makedirs(local_dir, exist_ok=True)

# Specify the filename for the model
filename = "unsloth.Q4_K_M.gguf"
model_path = os.path.join(local_dir, filename)

# Function to download the model file
def download_model(repo_id, filename, save_path):
    # Construct the URL for the model file
    url = f"https://huggingface.co/{repo_id}/resolve/main/{filename}"

    # Download the model file
    response = requests.get(url)
    if response.status_code == 200:
        with open(save_path, 'wb') as f:
            f.write(response.content)
        print(f"Model downloaded to {save_path}")
    else:
        print(f"Failed to download model: {response.status_code}")

# Download the model if it doesn't exist
if not os.path.exists(model_path):
    download_model("PurpleAILAB/Llama3.2-3B-uncensored-SQLi-Q4_K_M-GGUF", filename, model_path)

# Load the model from the specified path
llm = Llama.from_pretrained(
    repo_id="PurpleAILAB/Llama3.2-3B-uncensored-SQLi-Q4_K_M-GGUF",
    filename=model_path,
)

# Create a chat completion
response = llm.create_chat_completion(
    messages=[
        {
            "role": "user",
            "content": "What is the capital of France?"
        }
    ]
)

# Print the response
print(response)