userlocallm commited on
Commit
157245b
·
verified ·
1 Parent(s): fd5cdea

Upload 2 files

Browse files
Files changed (1) hide show
  1. src/agent.py +3 -3
src/agent.py CHANGED
@@ -23,7 +23,7 @@ class Agent:
23
  self.temperature = temperature # Default value
24
  self.top_p = top_p # Default value
25
 
26
- def process_query(self, user_id: str, query: str, context: str = "") -> str:
27
  # Normalize the query to lowercase
28
  query = query.lower()
29
 
@@ -53,7 +53,7 @@ class Agent:
53
  return response
54
 
55
  # For general queries, use the existing multi-layer processing
56
- initial_response = extract_and_summarize(query, self.memory, self.llm, self.dynamic_query_response_prompt(query), max_tokens=self.max_tokens, temperature=self.temperature, top_p=self.top_p, context=context)
57
 
58
  # Evaluate the initial response
59
  if not self.evaluate_response(initial_response, query):
@@ -62,7 +62,7 @@ class Agent:
62
  # Combine initial and additional data
63
  combined_context = f"{initial_response}\n{additional_data}"
64
  # Truncate the combined context to fit within the model's context window
65
- max_context_length = 500 # Adjust this based on your LLM's token limit
66
  if len(combined_context) > max_context_length:
67
  combined_context = combined_context[:max_context_length]
68
  logging.info(f"Truncated combined context to {max_context_length} characters.")
 
23
  self.temperature = temperature # Default value
24
  self.top_p = top_p # Default value
25
 
26
+ def process_query(self, user_id: str, query: str) -> str:
27
  # Normalize the query to lowercase
28
  query = query.lower()
29
 
 
53
  return response
54
 
55
  # For general queries, use the existing multi-layer processing
56
+ initial_response = extract_and_summarize(query, self.memory, self.llm, self.dynamic_query_response_prompt(query), max_tokens=self.max_tokens, temperature=self.temperature, top_p=self.top_p)
57
 
58
  # Evaluate the initial response
59
  if not self.evaluate_response(initial_response, query):
 
62
  # Combine initial and additional data
63
  combined_context = f"{initial_response}\n{additional_data}"
64
  # Truncate the combined context to fit within the model's context window
65
+ max_context_length = 30000 # Adjust this based on your LLM's token limit
66
  if len(combined_context) > max_context_length:
67
  combined_context = combined_context[:max_context_length]
68
  logging.info(f"Truncated combined context to {max_context_length} characters.")