Spaces:
Running
Running
Upload 2 files
Browse files
app.py
CHANGED
@@ -230,13 +230,13 @@ GEMINI_MODELS = [
|
|
230 |
{"id": "gemini-exp-1121"},
|
231 |
{"id": "gemini-exp-1206"},
|
232 |
{"id": "gemini-2.0-flash-exp"},
|
233 |
-
{"id": "gemini-2.0-flash-thinking-exp-
|
234 |
{"id": "gemini-2.0-pro-exp"}
|
235 |
]
|
236 |
|
237 |
@app.route('/')
|
238 |
def index():
|
239 |
-
main_content = "Moonfanz Reminiproxy v2.3.
|
240 |
html_template = """
|
241 |
<!DOCTYPE html>
|
242 |
<html>
|
@@ -397,6 +397,7 @@ def chat_completions():
|
|
397 |
max_tokens = request_data.get('max_tokens', 8192)
|
398 |
show_thoughts = request_data.get('show_thoughts', False)
|
399 |
stream = request_data.get('stream', False)
|
|
|
400 |
hint = "流式" if stream else "非流"
|
401 |
logger.info(f"\n{model} [{hint}] → {current_api_key[:8]}...{current_api_key[-3:]}")
|
402 |
is_thinking = 'thinking' in model
|
@@ -404,7 +405,7 @@ def chat_completions():
|
|
404 |
response_type = 'streamGenerateContent' if stream else 'generateContent'
|
405 |
is_SSE = '&alt=sse' if stream else ''
|
406 |
|
407 |
-
contents, system_instruction, error_response = func.process_messages_for_gemini(messages)
|
408 |
|
409 |
if error_response:
|
410 |
logger.error(f"处理输入消息时出错↙\n {error_response}")
|
@@ -697,7 +698,7 @@ if __name__ == '__main__':
|
|
697 |
|
698 |
scheduler.add_job(keep_alive, 'interval', hours=12)
|
699 |
scheduler.start()
|
700 |
-
logger.info(f"Reminiproxy v2.3.
|
701 |
logger.info(f"最大尝试次数/MaxRetries: {MAX_RETRIES}")
|
702 |
logger.info(f"最大请求次数/MaxRequests: {MAX_REQUESTS}")
|
703 |
logger.info(f"请求限额窗口/LimitWindow: {LIMIT_WINDOW} 秒")
|
|
|
230 |
{"id": "gemini-exp-1121"},
|
231 |
{"id": "gemini-exp-1206"},
|
232 |
{"id": "gemini-2.0-flash-exp"},
|
233 |
+
{"id": "gemini-2.0-flash-thinking-exp-0121"},
|
234 |
{"id": "gemini-2.0-pro-exp"}
|
235 |
]
|
236 |
|
237 |
@app.route('/')
|
238 |
def index():
|
239 |
+
main_content = "Moonfanz Reminiproxy v2.3.5 2025-01-14"
|
240 |
html_template = """
|
241 |
<!DOCTYPE html>
|
242 |
<html>
|
|
|
397 |
max_tokens = request_data.get('max_tokens', 8192)
|
398 |
show_thoughts = request_data.get('show_thoughts', False)
|
399 |
stream = request_data.get('stream', False)
|
400 |
+
use_system_prompt = request_data.get('use_system_prompt', False)
|
401 |
hint = "流式" if stream else "非流"
|
402 |
logger.info(f"\n{model} [{hint}] → {current_api_key[:8]}...{current_api_key[-3:]}")
|
403 |
is_thinking = 'thinking' in model
|
|
|
405 |
response_type = 'streamGenerateContent' if stream else 'generateContent'
|
406 |
is_SSE = '&alt=sse' if stream else ''
|
407 |
|
408 |
+
contents, system_instruction, error_response = func.process_messages_for_gemini(messages, use_system_prompt)
|
409 |
|
410 |
if error_response:
|
411 |
logger.error(f"处理输入消息时出错↙\n {error_response}")
|
|
|
698 |
|
699 |
scheduler.add_job(keep_alive, 'interval', hours=12)
|
700 |
scheduler.start()
|
701 |
+
logger.info(f"Reminiproxy v2.3.5 启动")
|
702 |
logger.info(f"最大尝试次数/MaxRetries: {MAX_RETRIES}")
|
703 |
logger.info(f"最大请求次数/MaxRequests: {MAX_REQUESTS}")
|
704 |
logger.info(f"请求限额窗口/LimitWindow: {LIMIT_WINDOW} 秒")
|
func.py
CHANGED
@@ -27,11 +27,11 @@ def authenticate_request(request):
|
|
27 |
|
28 |
return True, None, None
|
29 |
|
30 |
-
def process_messages_for_gemini(messages):
|
31 |
gemini_history = []
|
32 |
errors = []
|
33 |
system_instruction_text = ""
|
34 |
-
is_system_phase =
|
35 |
for i, message in enumerate(messages):
|
36 |
role = message.get('role')
|
37 |
content = message.get('content')
|
|
|
27 |
|
28 |
return True, None, None
|
29 |
|
30 |
+
def process_messages_for_gemini(messages, use_system_prompt=False):
|
31 |
gemini_history = []
|
32 |
errors = []
|
33 |
system_instruction_text = ""
|
34 |
+
is_system_phase = use_system_prompt
|
35 |
for i, message in enumerate(messages):
|
36 |
role = message.get('role')
|
37 |
content = message.get('content')
|