Spaces:
Running
Running
Upload 4 files
Browse files- app.py +219 -144
- func.py +42 -2
- requirements.txt +1 -2
app.py
CHANGED
@@ -1,83 +1,104 @@
|
|
1 |
from flask import Flask, request, jsonify, Response, stream_with_context, render_template_string
|
|
|
|
|
2 |
import google.generativeai as genai
|
3 |
import json
|
4 |
-
from datetime import datetime
|
5 |
import os
|
6 |
import logging
|
7 |
import func
|
|
|
8 |
from apscheduler.schedulers.background import BackgroundScheduler
|
9 |
-
import requests
|
10 |
import time
|
|
|
|
|
|
|
11 |
|
12 |
os.environ['TZ'] = 'Asia/Shanghai'
|
|
|
13 |
app = Flask(__name__)
|
14 |
|
15 |
app.secret_key = os.urandom(24)
|
16 |
|
17 |
-
formatter = logging.Formatter('%(message)s')
|
18 |
|
|
|
19 |
logger = logging.getLogger(__name__)
|
20 |
-
logger.setLevel(logging.INFO)
|
21 |
-
|
22 |
handler = logging.StreamHandler()
|
23 |
handler.setFormatter(formatter)
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
},
|
36 |
-
{
|
37 |
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
38 |
-
"threshold": "BLOCK_NONE"
|
39 |
-
},
|
40 |
-
{
|
41 |
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
42 |
-
"threshold": "BLOCK_NONE"
|
43 |
-
},
|
44 |
-
]
|
45 |
|
46 |
class APIKeyManager:
|
47 |
def __init__(self):
|
48 |
self.api_keys = os.environ.get('KeyArray').split(',')
|
49 |
-
self.current_index = 0
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
for i, api_key in enumerate(self.api_keys):
|
52 |
logger.info(f"API Key{i}: {api_key[:11]}...")
|
53 |
|
54 |
-
def
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
return current_key
|
60 |
|
61 |
key_manager = APIKeyManager()
|
|
|
62 |
current_api_key = key_manager.get_available_key()
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
GEMINI_MODELS = [
|
66 |
-
{"id": "gemini-pro"},
|
67 |
-
{"id": "gemini-pro-vision"},
|
68 |
-
{"id": "gemini-1.0-pro"},
|
69 |
-
{"id": "gemini-1.0-pro-vision"},
|
70 |
{"id": "gemini-1.5-pro-002"},
|
|
|
|
|
|
|
71 |
{"id": "gemini-exp-1114"},
|
72 |
{"id": "gemini-exp-1121"},
|
73 |
{"id": "gemini-exp-1206"},
|
74 |
{"id": "gemini-2.0-flash-exp"},
|
75 |
{"id": "gemini-2.0-flash-thinking-exp-1219"},
|
76 |
-
{"id": "gemini-2.0-pro-exp"}
|
77 |
]
|
|
|
78 |
@app.route('/')
|
79 |
def index():
|
80 |
-
main_content = "Moonfanz
|
81 |
html_template = """
|
82 |
<!DOCTYPE html>
|
83 |
<html>
|
@@ -109,146 +130,195 @@ function copyLink(event) {
|
|
109 |
</script>
|
110 |
</head>
|
111 |
<body>
|
112 |
-
{{ main_content }}<br/><br/>完全开源、免费且禁止商用<br/><br/>点击复制反向代理: <a href="v1" onclick="copyLink(event)">Copy Link</a><br/>聊天来源选择"自定义(兼容 OpenAI)"<br/>将复制的网址填入到自定义端点<br
|
113 |
</body>
|
114 |
</html>
|
115 |
"""
|
116 |
return render_template_string(html_template, main_content=main_content)
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
@app.route('/hf/v1/chat/completions', methods=['POST'])
|
119 |
def chat_completions():
|
120 |
-
global current_api_key
|
121 |
is_authenticated, auth_error, status_code = func.authenticate_request(request)
|
122 |
if not is_authenticated:
|
123 |
return auth_error if auth_error else jsonify({'error': '未授权'}), status_code if status_code else 401
|
124 |
-
try:
|
125 |
-
request_data = request.get_json()
|
126 |
-
messages = request_data.get('messages', [])
|
127 |
-
model = request_data.get('model', 'gemini-exp-1206')
|
128 |
-
temperature = request_data.get('temperature', 1)
|
129 |
-
max_tokens = request_data.get('max_tokens', 8192)
|
130 |
-
stream = request_data.get('stream', False)
|
131 |
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
-
|
135 |
|
136 |
-
|
137 |
-
print(error_response)
|
138 |
|
139 |
-
|
|
|
|
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
"
|
144 |
-
|
|
|
|
|
145 |
|
146 |
-
|
147 |
-
model_name=model,
|
148 |
-
generation_config=generation_config,
|
149 |
-
safety_settings=safety_settings
|
150 |
-
)
|
151 |
|
152 |
-
|
153 |
|
|
|
154 |
if gemini_history:
|
155 |
chat_session = gen_model.start_chat(history=gemini_history)
|
156 |
-
response = chat_session.send_message(user_message, stream=
|
157 |
else:
|
158 |
-
response = gen_model.generate_content(user_message, stream=
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
'delta': {
|
168 |
-
'content': chunk.text
|
169 |
-
},
|
170 |
-
'finish_reason': None,
|
171 |
-
'index': 0
|
172 |
-
}
|
173 |
-
],
|
174 |
-
'object': 'chat.completion.chunk'
|
175 |
-
}
|
176 |
-
|
177 |
-
yield f"data: {json.dumps(data)}\n\n"
|
178 |
data = {
|
179 |
'choices': [
|
180 |
{
|
181 |
-
'delta': {
|
182 |
-
|
|
|
|
|
183 |
'index': 0
|
184 |
}
|
185 |
],
|
186 |
'object': 'chat.completion.chunk'
|
187 |
}
|
188 |
-
logger.info(f"200!")
|
189 |
-
yield f"data: {json.dumps(data)}\n\n"
|
190 |
-
except Exception as e:
|
191 |
-
|
192 |
-
logger.error(f"Error during streaming: {str(e)}")
|
193 |
-
current_api_key = key_manager.get_available_key()
|
194 |
-
logger.info(f"API KEY Switched -> {current_api_key[:11]}...")
|
195 |
-
data = {
|
196 |
-
'error': {
|
197 |
-
'message': str(e),
|
198 |
-
'type': 'internal_server_error'
|
199 |
-
}
|
200 |
-
}
|
201 |
yield f"data: {json.dumps(data)}\n\n"
|
202 |
|
203 |
-
|
204 |
-
|
|
|
205 |
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
text_content = response.candidates[0].content.parts[0].text
|
214 |
-
|
215 |
-
except (AttributeError, IndexError, TypeError) as e:
|
216 |
-
logger.error(f"Error getting text content: {str(e)}")
|
217 |
-
|
218 |
-
text_content = "Error: Unable to get text content."
|
219 |
-
|
220 |
-
response_data = {
|
221 |
-
'id': 'chatcmpl-xxxxxxxxxxxx',
|
222 |
-
'object': 'chat.completion',
|
223 |
-
'created': int(datetime.now().timestamp()),
|
224 |
-
'model': model,
|
225 |
-
'choices': [{
|
226 |
-
'index': 0,
|
227 |
-
'message': {
|
228 |
-
'role': 'assistant',
|
229 |
-
'content': text_content
|
230 |
-
},
|
231 |
-
'finish_reason': 'stop'
|
232 |
-
}],
|
233 |
-
'usage':{
|
234 |
-
'prompt_tokens': 0,
|
235 |
-
'completion_tokens': 0,
|
236 |
-
'total_tokens': 0
|
237 |
}
|
238 |
}
|
239 |
-
|
240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
-
|
243 |
-
logger.error(f"
|
244 |
-
|
245 |
-
logger.info(f"API KEY Switched -> {current_api_key[:11]}...")
|
246 |
-
return jsonify({
|
247 |
'error': {
|
248 |
-
'message':
|
249 |
-
'type': '
|
250 |
}
|
251 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
@app.route('/hf/v1/models', methods=['GET'])
|
254 |
def list_models():
|
@@ -265,7 +335,12 @@ def keep_alive():
|
|
265 |
|
266 |
if __name__ == '__main__':
|
267 |
scheduler = BackgroundScheduler()
|
268 |
-
scheduler.add_job(keep_alive, 'interval', hours = 12)
|
269 |
|
|
|
270 |
scheduler.start()
|
|
|
|
|
|
|
|
|
|
|
271 |
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
|
|
|
1 |
from flask import Flask, request, jsonify, Response, stream_with_context, render_template_string
|
2 |
+
from google.generativeai.types import generation_types
|
3 |
+
from google.api_core.exceptions import InvalidArgument, ResourceExhausted, ServiceUnavailable, InternalServerError, Aborted
|
4 |
import google.generativeai as genai
|
5 |
import json
|
|
|
6 |
import os
|
7 |
import logging
|
8 |
import func
|
9 |
+
from datetime import datetime, timedelta
|
10 |
from apscheduler.schedulers.background import BackgroundScheduler
|
|
|
11 |
import time
|
12 |
+
import requests
|
13 |
+
from collections import deque
|
14 |
+
|
15 |
|
16 |
os.environ['TZ'] = 'Asia/Shanghai'
|
17 |
+
|
18 |
app = Flask(__name__)
|
19 |
|
20 |
app.secret_key = os.urandom(24)
|
21 |
|
|
|
22 |
|
23 |
+
formatter = logging.Formatter('%(message)s')
|
24 |
logger = logging.getLogger(__name__)
|
25 |
+
logger.setLevel(logging.INFO)
|
|
|
26 |
handler = logging.StreamHandler()
|
27 |
handler.setFormatter(formatter)
|
28 |
+
logger.addHandler(handler)
|
29 |
|
30 |
+
MAX_RETRIES = int(os.environ.get('MaxRetries', 3))
|
31 |
+
MAX_REQUESTS = int(os.environ.get('MaxRequests', 4))
|
32 |
+
LIMIT_WINDOW = int(os.environ.get('LimitWindow', 60))
|
33 |
+
RETRY_DELAY = 1
|
34 |
+
MAX_RETRY_DELAY = 16
|
35 |
+
|
36 |
+
request_counts = {}
|
37 |
+
|
38 |
+
api_key_blacklist = set()
|
39 |
+
api_key_blacklist_duration = 60
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
class APIKeyManager:
|
42 |
def __init__(self):
|
43 |
self.api_keys = os.environ.get('KeyArray').split(',')
|
44 |
+
self.current_index = 0
|
45 |
+
|
46 |
+
def get_available_key(self):
|
47 |
+
num_keys = len(self.api_keys)
|
48 |
+
for _ in range(num_keys):
|
49 |
+
if self.current_index >= num_keys:
|
50 |
+
self.current_index = 0
|
51 |
+
current_key = self.api_keys[self.current_index]
|
52 |
+
self.current_index += 1
|
53 |
+
|
54 |
+
if current_key not in api_key_blacklist:
|
55 |
+
return current_key
|
56 |
+
|
57 |
+
logger.error("所有API key都已耗尽或被黑名单,请重新配置或稍后重试")
|
58 |
+
return None
|
59 |
+
|
60 |
+
def show_all_keys(self):
|
61 |
+
logger.info(f"当前可用API key个数: {len(self.api_keys)} ")
|
62 |
for i, api_key in enumerate(self.api_keys):
|
63 |
logger.info(f"API Key{i}: {api_key[:11]}...")
|
64 |
|
65 |
+
def blacklist_key(self, key):
|
66 |
+
logger.warning(f"{key[:11]} → 加入黑名单 {api_key_blacklist_duration} 秒")
|
67 |
+
api_key_blacklist.add(key)
|
68 |
+
|
69 |
+
scheduler.add_job(lambda: api_key_blacklist.discard(key), 'date', run_date=datetime.now() + timedelta(seconds=api_key_blacklist_duration))
|
|
|
70 |
|
71 |
key_manager = APIKeyManager()
|
72 |
+
key_manager.show_all_keys()
|
73 |
current_api_key = key_manager.get_available_key()
|
74 |
+
|
75 |
+
def switch_api_key():
|
76 |
+
global current_api_key
|
77 |
+
key = key_manager.get_available_key()
|
78 |
+
if key:
|
79 |
+
current_api_key = key
|
80 |
+
logger.info(f"API key 替换为 → {current_api_key[:11]}...")
|
81 |
+
else:
|
82 |
+
logger.error("API key 替换失败,所有API key都已耗尽或被黑名单,请重新配置或稍后重试")
|
83 |
+
|
84 |
+
logger.info(f"当前 API key: {current_api_key[:11]}...")
|
85 |
|
86 |
GEMINI_MODELS = [
|
|
|
|
|
|
|
|
|
87 |
{"id": "gemini-1.5-pro-002"},
|
88 |
+
{"id": "gemini-1.5-pro-latest"},
|
89 |
+
{"id": "gemini-1.5-pro-exp-0827"},
|
90 |
+
{"id": "learnlm-1.5-pro-experimental"},
|
91 |
{"id": "gemini-exp-1114"},
|
92 |
{"id": "gemini-exp-1121"},
|
93 |
{"id": "gemini-exp-1206"},
|
94 |
{"id": "gemini-2.0-flash-exp"},
|
95 |
{"id": "gemini-2.0-flash-thinking-exp-1219"},
|
96 |
+
{"id": "gemini-2.0-pro-exp"}
|
97 |
]
|
98 |
+
|
99 |
@app.route('/')
|
100 |
def index():
|
101 |
+
main_content = "Moonfanz Reminiproxy"
|
102 |
html_template = """
|
103 |
<!DOCTYPE html>
|
104 |
<html>
|
|
|
130 |
</script>
|
131 |
</head>
|
132 |
<body>
|
133 |
+
{{ main_content }}<br/><br/>完全开源、免费且禁止商用<br/><br/>点击复制反向代理: <a href="v1" onclick="copyLink(event)">Copy Link</a><br/>聊天来源选择"自定义(兼容 OpenAI)"<br/>将复制的网址填入到自定义端点<br/>将设置password填入自定义API秘钥<br/><br/><br/>
|
134 |
</body>
|
135 |
</html>
|
136 |
"""
|
137 |
return render_template_string(html_template, main_content=main_content)
|
138 |
|
139 |
+
def is_within_rate_limit(api_key):
|
140 |
+
now = datetime.now()
|
141 |
+
if api_key not in request_counts:
|
142 |
+
request_counts[api_key] = deque()
|
143 |
+
|
144 |
+
while request_counts[api_key] and request_counts[api_key][0] < now - timedelta(seconds=LIMIT_WINDOW):
|
145 |
+
request_counts[api_key].popleft()
|
146 |
+
|
147 |
+
return len(request_counts[api_key]) < MAX_REQUESTS
|
148 |
+
|
149 |
+
def increment_request_count(api_key):
|
150 |
+
now = datetime.now()
|
151 |
+
if api_key not in request_counts:
|
152 |
+
request_counts[api_key] = deque()
|
153 |
+
request_counts[api_key].append(now)
|
154 |
+
|
155 |
+
def handle_api_error(error, attempt, stream=False):
|
156 |
+
if attempt > MAX_RETRIES:
|
157 |
+
logger.error(f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入")
|
158 |
+
return False, jsonify({
|
159 |
+
'error': {
|
160 |
+
'message': f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入",
|
161 |
+
'type': 'max_retries_exceeded'
|
162 |
+
}
|
163 |
+
})
|
164 |
+
|
165 |
+
if isinstance(error, InvalidArgument):
|
166 |
+
logger.error(f"{current_api_key[:11]} → 无效,可能被删除或过期")
|
167 |
+
key_manager.blacklist_key(current_api_key)
|
168 |
+
switch_api_key()
|
169 |
+
return False, None
|
170 |
+
|
171 |
+
elif isinstance(error, (ResourceExhausted, Aborted, InternalServerError, ServiceUnavailable)):
|
172 |
+
delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
|
173 |
+
if isinstance(error, ResourceExhausted):
|
174 |
+
logger.warning(f"{current_api_key[:11]} → 超过限额;{delay} 秒后重试...")
|
175 |
+
else:
|
176 |
+
logger.warning(f"{current_api_key[:11]} → 未知错误↙ \n{type(error).__name__}\n{delay} 秒后重试...")
|
177 |
+
time.sleep(delay)
|
178 |
+
if isinstance(error, (ResourceExhausted)):
|
179 |
+
key_manager.blacklist_key(current_api_key)
|
180 |
+
switch_api_key()
|
181 |
+
return False, None
|
182 |
+
|
183 |
+
elif isinstance(error, generation_types.StopCandidateException):
|
184 |
+
logger.warning(f"输出截断")
|
185 |
+
switch_api_key()
|
186 |
+
return False, None
|
187 |
+
|
188 |
+
else:
|
189 |
+
logger.error(f"未知错误↙\n {error}")
|
190 |
+
return False, None
|
191 |
+
|
192 |
@app.route('/hf/v1/chat/completions', methods=['POST'])
|
193 |
def chat_completions():
|
|
|
194 |
is_authenticated, auth_error, status_code = func.authenticate_request(request)
|
195 |
if not is_authenticated:
|
196 |
return auth_error if auth_error else jsonify({'error': '未授权'}), status_code if status_code else 401
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
|
198 |
+
request_data = request.get_json()
|
199 |
+
messages = request_data.get('messages', [])
|
200 |
+
model = request_data.get('model', 'gemini-2.0-flash-exp')
|
201 |
+
temperature = request_data.get('temperature', 1)
|
202 |
+
max_tokens = request_data.get('max_tokens', 8192)
|
203 |
+
stream = request_data.get('stream', False)
|
204 |
|
205 |
+
logger.info(f"\n{model} [r] → {current_api_key[:11]}...")
|
206 |
|
207 |
+
gemini_history, user_message, error_response = func.process_messages_for_gemini(messages)
|
|
|
208 |
|
209 |
+
if error_response:
|
210 |
+
logger.error(f"Error in processing messages: {error_response}")
|
211 |
+
return jsonify(error_response), 400
|
212 |
|
213 |
+
def do_request(current_api_key, attempt):
|
214 |
+
if not is_within_rate_limit(current_api_key):
|
215 |
+
logger.warning(f"{current_api_key[:11]} → 暂时超过限额")
|
216 |
+
key_manager.blacklist_key(current_api_key)
|
217 |
+
switch_api_key()
|
218 |
+
return False, None
|
219 |
|
220 |
+
increment_request_count(current_api_key)
|
|
|
|
|
|
|
|
|
221 |
|
222 |
+
gen_model = func.get_gen_model(current_api_key, model, temperature, max_tokens)
|
223 |
|
224 |
+
try:
|
225 |
if gemini_history:
|
226 |
chat_session = gen_model.start_chat(history=gemini_history)
|
227 |
+
response = chat_session.send_message(user_message, stream=stream)
|
228 |
else:
|
229 |
+
response = gen_model.generate_content(user_message, stream=stream)
|
230 |
+
return True, response
|
231 |
+
except Exception as e:
|
232 |
+
return handle_api_error(e, attempt, stream)
|
233 |
+
|
234 |
+
def generate(response):
|
235 |
+
try:
|
236 |
+
for chunk in response:
|
237 |
+
if chunk.text:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
data = {
|
239 |
'choices': [
|
240 |
{
|
241 |
+
'delta': {
|
242 |
+
'content': chunk.text
|
243 |
+
},
|
244 |
+
'finish_reason': None,
|
245 |
'index': 0
|
246 |
}
|
247 |
],
|
248 |
'object': 'chat.completion.chunk'
|
249 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
yield f"data: {json.dumps(data)}\n\n"
|
251 |
|
252 |
+
|
253 |
+
yield "data: [DONE]\n\n"
|
254 |
+
logger.info(f"200!")
|
255 |
|
256 |
+
except Exception as e:
|
257 |
+
switch_api_key()
|
258 |
+
logger.error(f"流式输出时截断")
|
259 |
+
error_data = {
|
260 |
+
'error': {
|
261 |
+
'message': '流式输出时截断,请关闭流式输出或修改你的输入',
|
262 |
+
'type': 'internal_server_error'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
}
|
264 |
}
|
265 |
+
yield f"data: {json.dumps(error_data)}\n\n"
|
266 |
+
yield "data: [DONE]\n\n"
|
267 |
+
|
268 |
+
attempt = 0
|
269 |
+
success = False
|
270 |
+
response = None
|
271 |
+
|
272 |
+
while attempt < MAX_RETRIES and not success:
|
273 |
+
attempt += 1
|
274 |
+
logger.info(f"尝试中... 第 {attempt}/{MAX_RETRIES} 次尝试")
|
275 |
+
success, response = do_request(current_api_key, attempt)
|
276 |
|
277 |
+
if not success:
|
278 |
+
logger.error(f" {MAX_RETRIES} 次尝试均失败,请修改预设")
|
279 |
+
response = {
|
|
|
|
|
280 |
'error': {
|
281 |
+
'message': f' {MAX_RETRIES} 次尝试均失败,请修改预设',
|
282 |
+
'type': 'internal_server_error'
|
283 |
}
|
284 |
+
}
|
285 |
+
return jsonify(response), 500 if response is not None else 503
|
286 |
+
|
287 |
+
if stream:
|
288 |
+
return Response(stream_with_context(generate(response)), mimetype='text/event-stream')
|
289 |
+
else:
|
290 |
+
try:
|
291 |
+
text_content = response.text
|
292 |
+
except (AttributeError, IndexError, TypeError) as e:
|
293 |
+
logger.error(f"处理响应时出错↙\n{e}")
|
294 |
+
return jsonify({
|
295 |
+
'error': {
|
296 |
+
'message': '处理响应时出错',
|
297 |
+
'type': 'response_processing_error'
|
298 |
+
}
|
299 |
+
}), 500
|
300 |
+
|
301 |
+
response_data = {
|
302 |
+
'id': 'chatcmpl-xxxxxxxxxxxx',
|
303 |
+
'object': 'chat.completion',
|
304 |
+
'created': int(datetime.now().timestamp()),
|
305 |
+
'model': model,
|
306 |
+
'choices': [{
|
307 |
+
'index': 0,
|
308 |
+
'message': {
|
309 |
+
'role': 'assistant',
|
310 |
+
'content': text_content
|
311 |
+
},
|
312 |
+
'finish_reason': 'stop'
|
313 |
+
}],
|
314 |
+
'usage': {
|
315 |
+
'prompt_tokens': 0,
|
316 |
+
'completion_tokens': 0,
|
317 |
+
'total_tokens': 0
|
318 |
+
}
|
319 |
+
}
|
320 |
+
logger.info(f"200!")
|
321 |
+
return jsonify(response_data)
|
322 |
|
323 |
@app.route('/hf/v1/models', methods=['GET'])
|
324 |
def list_models():
|
|
|
335 |
|
336 |
if __name__ == '__main__':
|
337 |
scheduler = BackgroundScheduler()
|
|
|
338 |
|
339 |
+
scheduler.add_job(keep_alive, 'interval', hours=12)
|
340 |
scheduler.start()
|
341 |
+
|
342 |
+
logger.info(f"最大尝试次数/Max retries: {MAX_RETRIES}")
|
343 |
+
logger.info(f"最大请求次数/Max requests: {MAX_REQUESTS}")
|
344 |
+
logger.info(f"请求限额窗口/Limit window: {LIMIT_WINDOW} seconds")
|
345 |
+
|
346 |
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
|
func.py
CHANGED
@@ -6,8 +6,33 @@ import logging
|
|
6 |
import json
|
7 |
import re
|
8 |
import os
|
|
|
|
|
9 |
logger = logging.getLogger(__name__)
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
password = os.environ['password']
|
12 |
|
13 |
def authenticate_request(request):
|
@@ -15,7 +40,7 @@ def authenticate_request(request):
|
|
15 |
|
16 |
if not auth_header:
|
17 |
return False, jsonify({'error': '缺少Authorization请求头'}), 401
|
18 |
-
|
19 |
try:
|
20 |
auth_type, pass_word = auth_header.split(' ', 1)
|
21 |
except ValueError:
|
@@ -29,8 +54,23 @@ def authenticate_request(request):
|
|
29 |
|
30 |
return True, None, None
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
def process_messages_for_gemini(messages):
|
33 |
-
|
34 |
gemini_history = []
|
35 |
for message in messages:
|
36 |
role = message.get('role')
|
|
|
6 |
import json
|
7 |
import re
|
8 |
import os
|
9 |
+
import requests
|
10 |
+
import google.generativeai as genai
|
11 |
logger = logging.getLogger(__name__)
|
12 |
|
13 |
+
|
14 |
+
request_counts = {}
|
15 |
+
|
16 |
+
# 核心优势
|
17 |
+
safety_settings = [
|
18 |
+
{
|
19 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
20 |
+
"threshold": "BLOCK_NONE"
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
24 |
+
"threshold": "BLOCK_NONE"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
28 |
+
"threshold": "BLOCK_NONE"
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
32 |
+
"threshold": "BLOCK_NONE"
|
33 |
+
},
|
34 |
+
]
|
35 |
+
|
36 |
password = os.environ['password']
|
37 |
|
38 |
def authenticate_request(request):
|
|
|
40 |
|
41 |
if not auth_header:
|
42 |
return False, jsonify({'error': '缺少Authorization请求头'}), 401
|
43 |
+
|
44 |
try:
|
45 |
auth_type, pass_word = auth_header.split(' ', 1)
|
46 |
except ValueError:
|
|
|
54 |
|
55 |
return True, None, None
|
56 |
|
57 |
+
def get_gen_model(api_key, model, temperature, max_tokens):
|
58 |
+
genai.configure(api_key=api_key)
|
59 |
+
|
60 |
+
generation_config = {
|
61 |
+
"temperature": temperature,
|
62 |
+
"max_output_tokens": max_tokens
|
63 |
+
}
|
64 |
+
|
65 |
+
gen_model = genai.GenerativeModel(
|
66 |
+
model_name=model,
|
67 |
+
generation_config=generation_config,
|
68 |
+
safety_settings=safety_settings
|
69 |
+
)
|
70 |
+
return gen_model
|
71 |
+
|
72 |
def process_messages_for_gemini(messages):
|
73 |
+
|
74 |
gemini_history = []
|
75 |
for message in messages:
|
76 |
role = message.get('role')
|
requirements.txt
CHANGED
@@ -5,5 +5,4 @@ Werkzeug==2.0.3
|
|
5 |
google==3.0.0
|
6 |
google-generativeai==0.8.3
|
7 |
pillow==10.4.0
|
8 |
-
apscheduler
|
9 |
-
Flask-HTTPAuth==4.8.0
|
|
|
5 |
google==3.0.0
|
6 |
google-generativeai==0.8.3
|
7 |
pillow==10.4.0
|
8 |
+
apscheduler
|
|