Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,6 @@ import requests
|
|
8 |
import logging
|
9 |
import subprocess
|
10 |
from llama_cpp import Llama # Import Llama from llama_cpp
|
11 |
-
import spacy
|
12 |
|
13 |
# Configure logging
|
14 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
@@ -21,21 +20,9 @@ def install_requirements():
|
|
21 |
except subprocess.CalledProcessError as e:
|
22 |
logging.error(f"Failed to install requirements: {e}")
|
23 |
|
24 |
-
# Function to download the spaCy model
|
25 |
-
def download_spacy_model(model_name):
|
26 |
-
try:
|
27 |
-
subprocess.check_call([os.sys.executable, '-m', 'spacy', 'download', model_name])
|
28 |
-
logging.info(f"SpaCy model {model_name} downloaded successfully.")
|
29 |
-
except subprocess.CalledProcessError as e:
|
30 |
-
logging.error(f"Failed to download SpaCy model {model_name}: {e}")
|
31 |
-
|
32 |
# Install requirements
|
33 |
install_requirements()
|
34 |
|
35 |
-
# Download the spaCy model if it doesn't exist
|
36 |
-
if not spacy.util.is_package('en_core_web_lg'):
|
37 |
-
download_spacy_model('en_core_web_lg')
|
38 |
-
|
39 |
# Create the directory if it doesn't exist
|
40 |
local_dir = "models"
|
41 |
os.makedirs(local_dir, exist_ok=True)
|
@@ -60,13 +47,7 @@ def download_model(repo_id, filename, save_path):
|
|
60 |
|
61 |
# Download the model if it doesn't exist
|
62 |
if not os.path.exists(model_path):
|
63 |
-
download_model("
|
64 |
-
|
65 |
-
# Function to truncate context to fit within the model's context window
|
66 |
-
def truncate_context(context, max_tokens):
|
67 |
-
words = context.split()
|
68 |
-
truncated_context = ' '.join(words[-max_tokens:])
|
69 |
-
return truncated_context
|
70 |
|
71 |
def respond(
|
72 |
message,
|
@@ -86,24 +67,14 @@ def respond(
|
|
86 |
# Load the model with the maximum context length and control the maximum tokens in the response
|
87 |
llm = Llama(
|
88 |
model_path=model_path,
|
89 |
-
n_ctx=
|
90 |
-
max_tokens=
|
91 |
)
|
92 |
|
93 |
agent = Agent(llm, db_path, system_prompt)
|
94 |
user_id = str(uuid.uuid4()) # Generate a unique user ID for each session
|
95 |
|
96 |
-
|
97 |
-
# Truncate the context to fit within the model's context window
|
98 |
-
max_context_tokens = 500 # Adjust this based on your model's context window
|
99 |
-
context = f"{system_prompt}\nUser: {message}\nAssistant: "
|
100 |
-
truncated_context = truncate_context(context, max_context_tokens)
|
101 |
-
|
102 |
-
response = agent.process_query(user_id, message, truncated_context)
|
103 |
-
except ValueError as e:
|
104 |
-
logging.error(f"Error during processing: {e}")
|
105 |
-
response = "Désolé, il y a eu une erreur lors du traitement de votre requête. Veuillez essayer à nouveau."
|
106 |
-
|
107 |
return response
|
108 |
|
109 |
"""
|
|
|
8 |
import logging
|
9 |
import subprocess
|
10 |
from llama_cpp import Llama # Import Llama from llama_cpp
|
|
|
11 |
|
12 |
# Configure logging
|
13 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
20 |
except subprocess.CalledProcessError as e:
|
21 |
logging.error(f"Failed to install requirements: {e}")
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
# Install requirements
|
24 |
install_requirements()
|
25 |
|
|
|
|
|
|
|
|
|
26 |
# Create the directory if it doesn't exist
|
27 |
local_dir = "models"
|
28 |
os.makedirs(local_dir, exist_ok=True)
|
|
|
47 |
|
48 |
# Download the model if it doesn't exist
|
49 |
if not os.path.exists(model_path):
|
50 |
+
download_model("PurpleAILAB/Llama3.2-3B-uncensored-SQLi-Q4_K_M-GGUF", filename, model_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
def respond(
|
53 |
message,
|
|
|
67 |
# Load the model with the maximum context length and control the maximum tokens in the response
|
68 |
llm = Llama(
|
69 |
model_path=model_path,
|
70 |
+
n_ctx=5000, # Set the maximum context length
|
71 |
+
max_tokens=512 # Control the maximum number of tokens generated in the response
|
72 |
)
|
73 |
|
74 |
agent = Agent(llm, db_path, system_prompt)
|
75 |
user_id = str(uuid.uuid4()) # Generate a unique user ID for each session
|
76 |
|
77 |
+
response = agent.process_query(user_id, message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
return response
|
79 |
|
80 |
"""
|