userlocallm commited on
Commit
793aa71
·
verified ·
1 Parent(s): 79929e8

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +19 -3
  2. main.py +78 -0
app.py CHANGED
@@ -60,7 +60,13 @@ def download_model(repo_id, filename, save_path):
60
 
61
  # Download the model if it doesn't exist
62
  if not os.path.exists(model_path):
63
- download_model("PurpleAILAB/Llama3.2-3B-uncensored-SQLi-Q4_K_M-GGUF", filename, model_path)
 
 
 
 
 
 
64
 
65
  def respond(
66
  message,
@@ -80,14 +86,24 @@ def respond(
80
  # Load the model with the maximum context length and control the maximum tokens in the response
81
  llm = Llama(
82
  model_path=model_path,
83
- n_ctx=5000, # Set the maximum context length
84
  max_tokens=512 # Control the maximum number of tokens generated in the response
85
  )
86
 
87
  agent = Agent(llm, db_path, system_prompt)
88
  user_id = str(uuid.uuid4()) # Generate a unique user ID for each session
89
 
90
- response = agent.process_query(user_id, message)
 
 
 
 
 
 
 
 
 
 
91
  return response
92
 
93
  """
 
60
 
61
  # Download the model if it doesn't exist
62
  if not os.path.exists(model_path):
63
+ download_model("adeptusnull/llama3.2-1b-wizardml-vicuna-uncensored-finetune-test", filename, model_path)
64
+
65
+ # Function to truncate context to fit within the model's context window
66
+ def truncate_context(context, max_tokens):
67
+ words = context.split()
68
+ truncated_context = ' '.join(words[-max_tokens:])
69
+ return truncated_context
70
 
71
  def respond(
72
  message,
 
86
  # Load the model with the maximum context length and control the maximum tokens in the response
87
  llm = Llama(
88
  model_path=model_path,
89
+ n_ctx=500, # Set the maximum context length
90
  max_tokens=512 # Control the maximum number of tokens generated in the response
91
  )
92
 
93
  agent = Agent(llm, db_path, system_prompt)
94
  user_id = str(uuid.uuid4()) # Generate a unique user ID for each session
95
 
96
+ try:
97
+ # Truncate the context to fit within the model's context window
98
+ max_context_tokens = 500 # Adjust this based on your model's context window
99
+ context = f"{system_prompt}\nUser: {message}\nAssistant: "
100
+ truncated_context = truncate_context(context, max_context_tokens)
101
+
102
+ response = agent.process_query(user_id, message, truncated_context)
103
+ except ValueError as e:
104
+ logging.error(f"Error during processing: {e}")
105
+ response = "Désolé, il y a eu une erreur lors du traitement de votre requête. Veuillez essayer à nouveau."
106
+
107
  return response
108
 
109
  """
main.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/main.py
2
+ from src.agent import Agent
3
+ from src.create_database import load_and_process_dataset # Import from create_database.py
4
+ import os
5
+ import uuid
6
+ import requests
7
+ import logging
8
+ from llama_cpp import Llama
9
+
10
+ # Configure logging
11
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
+
13
+ # Create the directory if it doesn't exist
14
+ local_dir = "models"
15
+ os.makedirs(local_dir, exist_ok=True)
16
+
17
+ # Specify the filename for the model
18
+ filename = "unsloth.Q4_K_M.gguf"
19
+ model_path = os.path.join(local_dir, filename)
20
+
21
+ # Function to download the model file
22
+ def download_model(repo_id, filename, save_path):
23
+ # Construct the URL for the model file
24
+ url = f"https://huggingface.co/{repo_id}/resolve/main/{filename}"
25
+
26
+ # Download the model file
27
+ response = requests.get(url)
28
+ if response.status_code == 200:
29
+ with open(save_path, 'wb') as f:
30
+ f.write(response.content)
31
+ print(f"Model downloaded to {save_path}")
32
+ else:
33
+ print(f"Failed to download model: {response.status_code}")
34
+
35
+ # Download the model if it doesn't exist
36
+ if not os.path.exists(model_path):
37
+ download_model("adeptusnull/llama3.2-1b-wizardml-vicuna-uncensored-finetune-test", filename, model_path)
38
+
39
+ def main():
40
+ model_path = "models/unsloth.Q4_K_M.gguf" # Path to the downloaded model
41
+ db_path = "agent.db"
42
+ system_prompt = "Vous êtes l'assistant intelligent de Les Chronique MTC. Votre rôle est d'aider les visiteurs en expliquant le contenu des Chroniques, Flash Infos et Chronique-FAQ de Michel Thomas. Utilisez le contexte fourni pour améliorer vos réponses et veillez à ce qu'elles soient précises et pertinentes."
43
+ max_tokens = 512
44
+ temperature = 0.7
45
+ top_p = 0.95
46
+
47
+ # Check if the database exists, if not, initialize it
48
+ if not os.path.exists(db_path):
49
+ data_update_path = "data-update.txt"
50
+ keyword_dir = "keyword" # Updated keyword directory
51
+ load_and_process_dataset(data_update_path, keyword_dir, db_path)
52
+
53
+ # Load the model
54
+ llm = Llama(
55
+ model_path=model_path,
56
+ n_ctx=5072, # Set the maximum context length
57
+ max_tokens=max_tokens # Control the maximum number of tokens generated in the response
58
+ )
59
+
60
+ agent = Agent(llm, db_path, system_prompt, max_tokens, temperature, top_p)
61
+
62
+ while True:
63
+ user_id = str(uuid.uuid4()) # Generate a unique user ID for each session
64
+ user_query = input("Entrez votre requête: ")
65
+ if user_query.lower() == 'exit':
66
+ break
67
+
68
+ try:
69
+ response = agent.process_query(user_id, user_query)
70
+ print("Réponse:", response)
71
+ except Exception as e:
72
+ print(f"Erreur lors du traitement de la requête: {e}")
73
+
74
+ # Clean up expired interactions
75
+ agent.memory.cleanup_expired_interactions()
76
+
77
+ if __name__ == "__main__":
78
+ main()