Spaces:
Sleeping
Sleeping
Upload agent.py
Browse files
agent.py
CHANGED
@@ -122,7 +122,7 @@ class Agent:
|
|
122 |
full_context = " ".join([memory['description'] for memory, _, _ in all_memories])
|
123 |
|
124 |
# Truncate the context if it exceeds the token limit
|
125 |
-
max_context_length =
|
126 |
if len(full_context) > max_context_length:
|
127 |
full_context = full_context[:max_context_length]
|
128 |
logging.info(f"Truncated full context to {max_context_length} characters.")
|
@@ -147,7 +147,7 @@ class Agent:
|
|
147 |
combined_context = f"Initial Response: {initial_response}\nHigh-Level Summary: {high_level_summary}"
|
148 |
|
149 |
# Truncate the combined context to fit within the model's context window
|
150 |
-
max_context_length =
|
151 |
if len(combined_context) > max_context_length:
|
152 |
combined_context = combined_context[:max_context_length]
|
153 |
logging.info(f"Truncated combined context to {max_context_length} characters.")
|
|
|
122 |
full_context = " ".join([memory['description'] for memory, _, _ in all_memories])
|
123 |
|
124 |
# Truncate the context if it exceeds the token limit
|
125 |
+
max_context_length = 500 # Adjust this based on your LLM's token limit
|
126 |
if len(full_context) > max_context_length:
|
127 |
full_context = full_context[:max_context_length]
|
128 |
logging.info(f"Truncated full context to {max_context_length} characters.")
|
|
|
147 |
combined_context = f"Initial Response: {initial_response}\nHigh-Level Summary: {high_level_summary}"
|
148 |
|
149 |
# Truncate the combined context to fit within the model's context window
|
150 |
+
max_context_length = 500 # Adjust this based on your LLM's token limit
|
151 |
if len(combined_context) > max_context_length:
|
152 |
combined_context = combined_context[:max_context_length]
|
153 |
logging.info(f"Truncated combined context to {max_context_length} characters.")
|