Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +44 -43
- templates/index.html +1 -1
app.py
CHANGED
@@ -170,40 +170,7 @@ def process_chat_with_files(chat_history, system_instruction, api_key):
|
|
170 |
Returns:
|
171 |
Response: Server-sent events response with chat messages
|
172 |
"""
|
173 |
-
try:
|
174 |
-
# Configure Gemini
|
175 |
-
|
176 |
-
# Initialize model
|
177 |
-
generation_config = {
|
178 |
-
"temperature": 1,
|
179 |
-
"top_p": 0.95,
|
180 |
-
"top_k": 40,
|
181 |
-
"max_output_tokens": 8192,
|
182 |
-
"response_mime_type": "text/plain",
|
183 |
-
}
|
184 |
-
|
185 |
-
model = genai.GenerativeModel(
|
186 |
-
model_name="gemini-1.5-pro-002",
|
187 |
-
generation_config=generation_config,
|
188 |
-
system_instruction=system_instruction,
|
189 |
-
)
|
190 |
-
|
191 |
-
# Process files if any
|
192 |
-
|
193 |
-
# Create chat session
|
194 |
-
chat_session = model.start_chat()
|
195 |
-
|
196 |
-
def generate():
|
197 |
-
response = chat_session.send_message(chat_history, stream=True)
|
198 |
-
for chunk in response:
|
199 |
-
if chunk.text:
|
200 |
-
yield f"data: {chunk.text}\n\n"
|
201 |
-
|
202 |
-
return Response(generate(), mimetype='text/event-stream')
|
203 |
|
204 |
-
except Exception as e:
|
205 |
-
logger.error(f"Error in process_chat_with_files: {str(e)}")
|
206 |
-
return jsonify({'error': str(e)}), 500
|
207 |
|
208 |
@app.route('/presets', methods=['GET'])
|
209 |
def get_presets():
|
@@ -290,11 +257,10 @@ def chat():
|
|
290 |
if not data or 'message' not in data or 'preset' not in data:
|
291 |
return jsonify({'status': 'error', 'error': 'Invalid request data'}), 400
|
292 |
|
293 |
-
|
294 |
preset_id = data['preset']
|
295 |
session_id = get_or_create_session_id()
|
296 |
chat_history = load_chat_history(session_id)
|
297 |
-
files = data.get('files', [])
|
298 |
system_instruction = ""
|
299 |
preset_name = None
|
300 |
|
@@ -316,18 +282,53 @@ def chat():
|
|
316 |
default_preset = next(p for p in PREDEFINED_PRESETS if p['id'] == 'default')
|
317 |
system_message = default_preset['content']
|
318 |
preset_name = default_preset['name']
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
|
|
|
|
324 |
|
325 |
-
|
326 |
|
327 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
328 |
|
329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
|
|
|
331 |
|
332 |
@app.route('/history', methods=['GET'])
|
333 |
def get_history():
|
|
|
170 |
Returns:
|
171 |
Response: Server-sent events response with chat messages
|
172 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
|
|
|
|
|
|
|
174 |
|
175 |
@app.route('/presets', methods=['GET'])
|
176 |
def get_presets():
|
|
|
257 |
if not data or 'message' not in data or 'preset' not in data:
|
258 |
return jsonify({'status': 'error', 'error': 'Invalid request data'}), 400
|
259 |
|
260 |
+
userMessage = data['userMessage']
|
261 |
preset_id = data['preset']
|
262 |
session_id = get_or_create_session_id()
|
263 |
chat_history = load_chat_history(session_id)
|
|
|
264 |
system_instruction = ""
|
265 |
preset_name = None
|
266 |
|
|
|
282 |
default_preset = next(p for p in PREDEFINED_PRESETS if p['id'] == 'default')
|
283 |
system_message = default_preset['content']
|
284 |
preset_name = default_preset['name']
|
285 |
+
def generate():
|
286 |
+
key = key_manager.get_available_key()
|
287 |
+
if not key:
|
288 |
+
yield "No available API keys."
|
289 |
+
return
|
290 |
+
|
291 |
+
debug.log_prompt(chat_history, preset_name)
|
292 |
|
293 |
+
messages = chat_history + userMessage
|
294 |
|
295 |
+
generation_config = {
|
296 |
+
"temperature": 1,
|
297 |
+
"top_p": 0.95,
|
298 |
+
"top_k": 40,
|
299 |
+
"max_output_tokens": 8192,
|
300 |
+
"response_mime_type": "text/plain",
|
301 |
+
}
|
302 |
+
|
303 |
+
model = genai.GenerativeModel(
|
304 |
+
model_name="gemini-1.5-pro-002",
|
305 |
+
generation_config=generation_config,
|
306 |
+
system_instruction=system_instruction,
|
307 |
+
)
|
308 |
|
309 |
+
try:
|
310 |
+
|
311 |
+
model_message = {"role": "model", "parts": [""]}
|
312 |
+
# Create chat session
|
313 |
+
chat_session = model.start_chat()
|
314 |
+
|
315 |
+
|
316 |
+
response = chat_session.send_message(messages, stream=True)
|
317 |
+
for chunk in response:
|
318 |
+
if chunk.text:
|
319 |
+
model_message['parts'][0] += chunk.text
|
320 |
+
yield f"data: {chunk.text}\n\n"
|
321 |
+
|
322 |
+
chat_history.append(model_message)
|
323 |
+
|
324 |
+
session_id = get_or_create_session_id()
|
325 |
+
save_chat_history(session_id, chat_history)
|
326 |
+
|
327 |
+
except Exception as e:
|
328 |
+
logger.error(f"Error in process_chat_with_files: {str(e)}")
|
329 |
+
return jsonify({'error': str(e)}), 500
|
330 |
|
331 |
+
return Response(generate(), mimetype='text/event-stream')
|
332 |
|
333 |
@app.route('/history', methods=['GET'])
|
334 |
def get_history():
|
templates/index.html
CHANGED
@@ -288,7 +288,7 @@
|
|
288 |
const response = await fetch('/chat', {
|
289 |
method: 'POST',
|
290 |
headers: { 'Content-Type': 'application/json' },
|
291 |
-
body: JSON.stringify({
|
292 |
});
|
293 |
|
294 |
const reader = response.body.getReader();
|
|
|
288 |
const response = await fetch('/chat', {
|
289 |
method: 'POST',
|
290 |
headers: { 'Content-Type': 'application/json' },
|
291 |
+
body: JSON.stringify({ userMessage: Message, preset: selectedPreset })
|
292 |
});
|
293 |
|
294 |
const reader = response.body.getReader();
|