|
@app.route('/hf/v1/chat/completions', methods=['POST']) |
|
def chat_completions(): |
|
global current_api_key |
|
is_authenticated, auth_error, status_code = authenticate_request(request) |
|
if not is_authenticated: |
|
return auth_error if auth_error else jsonify({'error': 'Unauthorized'}), status_code if status_code else 401 |
|
try: |
|
|
|
request_data = request.get_json() |
|
|
|
messages = request_data.get('messages', []) |
|
model = request_data.get('model', 'gemini-exp-1206') |
|
temperature = request_data.get('temperature', 1) |
|
max_tokens = request_data.get('max_tokens', 8192) |
|
stream = request_data.get('stream', False) |
|
|
|
logger.info(colored(f"\n{model} [r] -> {current_api_key[:11]}...", 'yellow')) |
|
# 将 OpenAI 格式的消息转换为 Gemini 格式 |
|
gemini_history = [] |
|
for message in messages: |
|
role = message.get('role') |
|
content = message.get('content') |
|
if role == 'system': |
|
gemini_history.append({"role": "user", "parts": [content]}) |
|
elif role == 'user': |
|
gemini_history.append({"role": "user", "parts": [content]}) |
|
elif role == 'assistant': |
|
gemini_history.append({"role": "model", "parts": [content]}) |
|
|
|
user_message = gemini_history[-1]['parts'][0] if gemini_history else messages[-1]['content'] |
|
gemini_history = gemini_history[:-1] if gemini_history else [] |
|
genai.configure(api_key=current_api_key) |
|
# 调用 Gemini API |
|
generation_config = { |
|
"temperature": temperature, |
|
"max_output_tokens": max_tokens |
|
} |
|
|
|
gen_model = genai.GenerativeModel( |
|
model_name=model, |
|
generation_config=generation_config, |
|
safety_settings=safety_settings |
|
) |
|
|
|
|
|
if stream: |
|
# 流式响应 |
|
if gemini_history: |
|
chat_session = gen_model.start_chat(history=gemini_history) |
|
response = chat_session.send_message(user_message, stream=True) |
|
else: |
|
response = gen_model.generate_content(user_message, stream=True) |
|
|
|
def generate(): |
|
try: |
|
for chunk in response: |
|
if chunk.text: |
|
data = { |
|
'choices': [ |
|
{ |
|
'delta': { |
|
'content': chunk.text |
|
}, |
|
'finish_reason': None, |
|
'index': 0 |
|
} |
|
], |
|
'object': 'chat.completion.chunk' |
|
} |
|
|
|
yield f"data: {json.dumps(data)}\n\n" |
|
data = { |
|
'choices': [ |
|
{ |
|
'delta': {}, |
|
'finish_reason': 'stop', |
|
'index': 0 |
|
} |
|
], |
|
'object': 'chat.completion.chunk' |
|
} |
|
|
|
yield f"data: {json.dumps(data)}\n\n" |
|
except Exception as e: |
|
logger.error(f"Error during streaming: {str(e)}") |
|
|
|
data = { |
|
'error': { |
|
'message': str(e), |
|
'type': 'internal_server_error' |
|
} |
|
} |
|
yield f"data: {json.dumps(data)}\n\n" |
|
|
|
return Response(stream_with_context(generate()), mimetype='text/event-stream') |
|
else: |
|
|
|
# 调用 API |
|
if gemini_history: |
|
chat_session = gen_model.start_chat(history=gemini_history) |
|
response = chat_session.send_message(user_message) |
|
else: |
|
response = gen_model.generate_content(user_message) |
|
|
|
try: |
|
text_content = response.candidates[0].content.parts[0].text |
|
|
|
except (AttributeError, IndexError, TypeError) as e: |
|
logger.error(colored(f"Error getting text content: {str(e)}",'red')) |
|
|
|
text_content = "Error: Unable to get text content." |
|
|
|
response_data = { |
|
'id': 'chatcmpl-xxxxxxxxxxxx', |
|
'object': 'chat.completion', |
|
'created': int(datetime.now().timestamp()), |
|
'model': model, |
|
'choices': [{ |
|
'index': 0, |
|
'message': { |
|
'role': 'assistant', |
|
'content': text_content |
|
}, |
|
'finish_reason': 'stop' |
|
}], |
|
'usage':{ |
|
'prompt_tokens': 0, |
|
'completion_tokens': 0, |
|
'total_tokens': 0 |
|
} |
|
} |
|
logger.info(colored(f"Generation Success", 'green')) |
|
return jsonify(response_data) |
|
|
|
except Exception as e: |
|
logger.error(f"Error in chat completions: {str(e)}") |
|
|
|
return jsonify({ |
|
'error': { |
|
'message': str(e), |
|
'type': 'invalid_request_error' |
|
} |
|
}), 500 |
|
finally: |
|
current_api_key = key_manager.get_available_key() |
|
logger.info(colored(f"API KEY Switched -> {current_api_key[:11]}...", 'aqua')) |