SkyCloud7 commited on
Commit
6b2c53b
·
verified ·
1 Parent(s): a81f3b5

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +10 -0
  2. app.py +459 -0
  3. func.py +108 -0
Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ WORKDIR /app
4
+
5
+ COPY requirements.txt .
6
+ RUN pip install --no-cache-dir -r requirements.txt
7
+
8
+ COPY . .
9
+
10
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, Response, stream_with_context, render_template_string
2
+ from google.generativeai.types import BlockedPromptException, StopCandidateException, generation_types
3
+ from google.api_core.exceptions import InvalidArgument, ResourceExhausted, Aborted, InternalServerError, ServiceUnavailable, PermissionDenied
4
+ import google.generativeai as genai
5
+ import json
6
+ import os
7
+ import re
8
+ import logging
9
+ import func
10
+ from datetime import datetime, timedelta
11
+ from apscheduler.schedulers.background import BackgroundScheduler
12
+ import time
13
+ import requests
14
+ from collections import deque
15
+ import random
16
+
17
+ os.environ['TZ'] = 'Asia/Shanghai'
18
+
19
+ app = Flask(__name__)
20
+
21
+ app.secret_key = os.urandom(24)
22
+
23
+ formatter = logging.Formatter('%(message)s')
24
+ logger = logging.getLogger(__name__)
25
+ logger.setLevel(logging.INFO)
26
+ handler = logging.StreamHandler()
27
+ handler.setFormatter(formatter)
28
+ logger.addHandler(handler)
29
+
30
+ MAX_RETRIES = int(os.environ.get('MaxRetries', 3))
31
+ MAX_REQUESTS = int(os.environ.get('MaxRequests', 2))
32
+ LIMIT_WINDOW = int(os.environ.get('LimitWindow', 60))
33
+ RETRY_DELAY = 1
34
+ MAX_RETRY_DELAY = 16
35
+
36
+ request_counts = {}
37
+
38
+ api_key_blacklist = set()
39
+ api_key_blacklist_duration = 60
40
+
41
+ # 核心优势
42
+ safety_settings = [
43
+ {
44
+ "category": "HARM_CATEGORY_HARASSMENT",
45
+ "threshold": "BLOCK_NONE"
46
+ },
47
+ {
48
+ "category": "HARM_CATEGORY_HATE_SPEECH",
49
+ "threshold": "BLOCK_NONE"
50
+ },
51
+ {
52
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
53
+ "threshold": "BLOCK_NONE"
54
+ },
55
+ {
56
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
57
+ "threshold": "BLOCK_NONE"
58
+ },
59
+ ]
60
+
61
+ class APIKeyManager:
62
+ def __init__(self):
63
+ self.api_keys = re.findall(r"AIzaSy[a-zA-Z0-9_-]{33}", os.environ.get('KeyArray'))
64
+ self.current_index = random.randint(0, len(self.api_keys) - 1)
65
+
66
+ def get_available_key(self):
67
+ num_keys = len(self.api_keys)
68
+ for _ in range(num_keys):
69
+ if self.current_index >= num_keys:
70
+ self.current_index = 0
71
+ current_key = self.api_keys[self.current_index]
72
+ self.current_index += 1
73
+
74
+ if current_key not in api_key_blacklist:
75
+ return current_key
76
+
77
+ logger.error("所有API key都已耗尽或被暂时禁用,请重新配置或稍后重试")
78
+ return None
79
+
80
+ def show_all_keys(self):
81
+ logger.info(f"当前可用API key个数: {len(self.api_keys)} ")
82
+ for i, api_key in enumerate(self.api_keys):
83
+ logger.info(f"API Key{i}: {api_key[:11]}...")
84
+
85
+ def blacklist_key(self, key):
86
+ logger.warning(f"{key[:11]} → 暂时禁用 {api_key_blacklist_duration} 秒")
87
+ api_key_blacklist.add(key)
88
+
89
+ scheduler.add_job(lambda: api_key_blacklist.discard(key), 'date', run_date=datetime.now() + timedelta(seconds=api_key_blacklist_duration))
90
+
91
+ key_manager = APIKeyManager()
92
+ key_manager.show_all_keys()
93
+ current_api_key = key_manager.get_available_key()
94
+
95
+ def switch_api_key():
96
+ global current_api_key
97
+ key = key_manager.get_available_key()
98
+ if key:
99
+ current_api_key = key
100
+ logger.info(f"API key 替换为 → {current_api_key[:11]}...")
101
+ else:
102
+ logger.error("API key 替换失败,所有API key都已耗尽或被暂时禁用,请重新配置或稍后重试")
103
+
104
+ logger.info(f"当前 API key: {current_api_key[:11]}...")
105
+
106
+ GEMINI_MODELS = [
107
+ {"id": "gemini-1.5-flash-8b-latest"},
108
+ {"id": "gemini-1.5-flash-8b-exp-0924"},
109
+ {"id": "gemini-1.5-flash-latest"},
110
+ {"id": "gemini-1.5-flash-exp-0827"},
111
+ {"id": "gemini-1.5-pro-latest"},
112
+ {"id": "gemini-1.5-pro-exp-0827"},
113
+ {"id": "learnlm-1.5-pro-experimental"},
114
+ {"id": "gemini-exp-1114"},
115
+ {"id": "gemini-exp-1121"},
116
+ {"id": "gemini-exp-1206"},
117
+ {"id": "gemini-2.0-flash-exp"},
118
+ {"id": "gemini-2.0-flash-thinking-exp-1219"},
119
+ {"id": "gemini-2.0-pro-exp"}
120
+ ]
121
+
122
+ @app.route('/')
123
+ def index():
124
+ main_content = "test"
125
+ html_template = """
126
+ <!DOCTYPE html>
127
+ <html>
128
+ <head>
129
+ <meta charset="utf-8">
130
+
131
+ </head>
132
+ <body>
133
+ {{ main_content }}<br/><br/>测试
134
+ </body>
135
+ </html>
136
+ """
137
+ return render_template_string(html_template, main_content=main_content)
138
+
139
+ def is_within_rate_limit(api_key):
140
+ now = datetime.now()
141
+ if api_key not in request_counts:
142
+ request_counts[api_key] = deque()
143
+
144
+ while request_counts[api_key] and request_counts[api_key][0] < now - timedelta(seconds=LIMIT_WINDOW):
145
+ request_counts[api_key].popleft()
146
+
147
+ if len(request_counts[api_key]) >= MAX_REQUESTS:
148
+ earliest_request_time = request_counts[api_key][0]
149
+ wait_time = (earliest_request_time + timedelta(seconds=LIMIT_WINDOW)) - now
150
+ return False, wait_time.total_seconds()
151
+ else:
152
+ return True, 0
153
+
154
+ def increment_request_count(api_key):
155
+ now = datetime.now()
156
+ if api_key not in request_counts:
157
+ request_counts[api_key] = deque()
158
+ request_counts[api_key].append(now)
159
+
160
+ def handle_api_error(error, attempt):
161
+ if attempt > MAX_RETRIES:
162
+ logger.error(f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入")
163
+ return 0, jsonify({
164
+ 'error': {
165
+ 'message': f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入",
166
+ 'type': 'max_retries_exceeded'
167
+ }
168
+ })
169
+
170
+ if isinstance(error, InvalidArgument):
171
+ logger.error(f"{current_api_key[:11]} → 无效,可能已过期或被删除")
172
+ key_manager.blacklist_key(current_api_key)
173
+ switch_api_key()
174
+ return 0, None
175
+
176
+ elif isinstance(error, ResourceExhausted):
177
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
178
+ logger.warning(f"{current_api_key[:11]} → 429 官方资源耗尽 → {delay} 秒后重试...")
179
+ key_manager.blacklist_key(current_api_key)
180
+ switch_api_key()
181
+ time.sleep(delay)
182
+ return 0, None
183
+
184
+ elif isinstance(error, Aborted):
185
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
186
+ logger.warning(f"{current_api_key[:11]} → 操作被中止 → {delay} 秒后重试...")
187
+ time.sleep(delay)
188
+ return 0, None
189
+
190
+ elif isinstance(error, InternalServerError):
191
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
192
+ logger.warning(f"{current_api_key[:11]} → 500 服务器内部错误 → {delay} 秒后重试...")
193
+ time.sleep(delay)
194
+ return 0, None
195
+
196
+ elif isinstance(error, ServiceUnavailable):
197
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
198
+ logger.warning(f"{current_api_key[:11]} → 503 服务不可用 → {delay} 秒后重试...")
199
+ time.sleep(delay)
200
+ return 0, None
201
+
202
+ elif isinstance(error, PermissionDenied):
203
+ logger.error(f"{current_api_key[:11]} → 403 权限被拒绝,该 API KEY 可能已经被官方封禁")
204
+ key_manager.blacklist_key(current_api_key)
205
+ switch_api_key()
206
+ return 0, None
207
+
208
+ elif isinstance(error, StopCandidateException):
209
+ logger.warning(f"AI输出内容被Gemini官方阻挡,代理没有得到有效回复")
210
+ switch_api_key()
211
+ return 0, None
212
+
213
+ elif isinstance(error, generation_types.BlockedPromptException):
214
+ try:
215
+ full_reason_str = str(error.args[0])
216
+
217
+ if "block_reason:" in full_reason_str:
218
+ start_index = full_reason_str.find("block_reason:") + len("block_reason:")
219
+ block_reason_str = full_reason_str[start_index:].strip()
220
+
221
+ if block_reason_str == "SAFETY":
222
+ logger.warning(f"用户输入因安全原因被阻止")
223
+ return 1, None
224
+ elif block_reason_str == "BLOCKLIST":
225
+ logger.warning(f"用户输入因包含阻止列表中的术语而被阻止")
226
+ return 1, None
227
+ elif block_reason_str == "PROHIBITED_CONTENT":
228
+ logger.warning(f"用户输入因包含禁止内容而被阻止")
229
+ return 1, None
230
+ elif block_reason_str == "OTHER":
231
+ logger.warning(f"用户输入因未知原因被阻止")
232
+ return 1, None
233
+ else:
234
+ logger.warning(f"用户输入被阻止,原因未知: {block_reason_str}")
235
+ return 1, None
236
+ else:
237
+ logger.warning(f"用户输入被阻止,原因未知: {full_reason_str}")
238
+ return 1, None
239
+
240
+ except (IndexError, AttributeError) as e:
241
+ logger.error(f"获取提示原因失败↙\n{e}")
242
+ logger.error(f"提示被阻止↙\n{error}")
243
+ return 2, None
244
+
245
+ else:
246
+ logger.error(f"该模型还未发布,暂时不可用,请更换模型或未来一段时间再试")
247
+ logger.error(f"证明↙\n{error}")
248
+ return 2, None
249
+
250
+ @app.route('/hf/v1/chat/completions', methods=['POST'])
251
+ def chat_completions():
252
+ is_authenticated, auth_error, status_code = func.authenticate_request(request)
253
+ if not is_authenticated:
254
+ return auth_error if auth_error else jsonify({'error': '未授权'}), status_code if status_code else 401
255
+
256
+ request_data = request.get_json()
257
+ messages = request_data.get('messages', [])
258
+ model = request_data.get('model', 'gemini-2.0-flash-exp')
259
+ temperature = request_data.get('temperature', 1)
260
+ max_tokens = request_data.get('max_tokens', 8192)
261
+ stream = request_data.get('stream', False)
262
+ hint = "流式" if stream else "非流"
263
+ logger.info(f"\n{model} [{hint}] → {current_api_key[:11]}...")
264
+
265
+ gemini_history, user_message, error_response = func.process_messages_for_gemini(messages)
266
+
267
+ if error_response:
268
+ logger.error(f"处理输入消息时出错↙\n {error_response}")
269
+ return jsonify(error_response), 400
270
+
271
+ def do_request(current_api_key, attempt):
272
+ isok, time = is_within_rate_limit(current_api_key)
273
+ if not isok:
274
+ logger.warning(f"{current_api_key[:11]} → 暂时超过限额,该API key将在 {time} 秒后启用...")
275
+ switch_api_key()
276
+ return 0, None
277
+
278
+ increment_request_count(current_api_key)
279
+
280
+ genai.configure(api_key=current_api_key)
281
+
282
+ generation_config = {
283
+ "temperature": temperature,
284
+ "max_output_tokens": max_tokens
285
+ }
286
+
287
+ gen_model = genai.GenerativeModel(
288
+ model_name=model,
289
+ generation_config=generation_config,
290
+ safety_settings=safety_settings
291
+ )
292
+
293
+ try:
294
+ if gemini_history:
295
+ chat_session = gen_model.start_chat(history=gemini_history)
296
+ response = chat_session.send_message(user_message, stream=stream)
297
+ else:
298
+ response = gen_model.generate_content(user_message, stream=stream)
299
+ return 1, response
300
+ except Exception as e:
301
+ return handle_api_error(e, attempt)
302
+
303
+ def generate(response):
304
+ try:
305
+ logger.info(f"流式开始...")
306
+ for chunk in response:
307
+ if chunk.text:
308
+ data = {
309
+ 'choices': [
310
+ {
311
+ 'delta': {
312
+ 'content': chunk.text
313
+ },
314
+ 'finish_reason': None,
315
+ 'index': 0
316
+ }
317
+ ],
318
+ 'object': 'chat.completion.chunk'
319
+ }
320
+ yield f"data: {json.dumps(data)}\n\n"
321
+
322
+ data = {
323
+ 'choices': [
324
+ {
325
+ 'delta': {},
326
+ 'finish_reason': 'stop',
327
+ 'index': 0
328
+ }
329
+ ],
330
+ 'object': 'chat.completion.chunk'
331
+ }
332
+ logger.info(f"流式结束")
333
+ yield f"data: {json.dumps(data)}\n\n"
334
+ logger.info(f"200!")
335
+
336
+ except Exception:
337
+ logger.error(f"流式输出中途被截断,请关闭流式输出或修改你的输入")
338
+ logger.info(f"流式结束")
339
+ error_data = {
340
+ 'error': {
341
+ 'message': '流式输出时截断,请关闭流式输出或修改你的输入',
342
+ 'type': 'internal_server_error'
343
+ }
344
+ }
345
+ yield f"data: {json.dumps(error_data)}\n\n"
346
+ data = {
347
+ 'choices': [
348
+ {
349
+ 'delta': {},
350
+ 'finish_reason': 'stop',
351
+ 'index': 0
352
+ }
353
+ ],
354
+ 'object': 'chat.completion.chunk'
355
+ }
356
+
357
+ yield f"data: {json.dumps(data)}\n\n"
358
+
359
+ attempt = 0
360
+ success = 0
361
+ response = None
362
+
363
+ for attempt in range(1, MAX_RETRIES + 1):
364
+ logger.info(f"第 {attempt}/{MAX_RETRIES} 次尝试 ...")
365
+ success, response = do_request(current_api_key, attempt)
366
+
367
+ if success == 1:
368
+ break
369
+ elif success == 2:
370
+
371
+ logger.error(f"{model} 很可能暂时不可用,请更换模型或未来一段时间再试")
372
+ response = {
373
+ 'error': {
374
+ 'message': f'{model} 很可能暂时不可用,请更换模型或未来一段时间再试',
375
+ 'type': 'internal_server_error'
376
+ }
377
+ }
378
+ return jsonify(response), 503
379
+
380
+ else:
381
+ logger.error(f"{MAX_RETRIES} 次尝试均失败,请调整配置或向Moonfanz反馈")
382
+ response = {
383
+ 'error': {
384
+ 'message': f'{MAX_RETRIES} 次尝试均失败,请调整配置或向Moonfanz反馈',
385
+ 'type': 'internal_server_error'
386
+ }
387
+ }
388
+ return jsonify(response), 500 if response is not None else 503
389
+
390
+ if stream:
391
+ return Response(stream_with_context(generate(response)), mimetype='text/event-stream')
392
+ else:
393
+ try:
394
+ text_content = response.text
395
+ except (AttributeError, IndexError, TypeError, ValueError) as e:
396
+ if "response.candidates is empty" in str(e):
397
+ logger.error(f"你的输入被AI安全过滤器阻止")
398
+ return jsonify({
399
+ 'error': {
400
+ 'message': '你的输入被AI安全过滤器阻止',
401
+ 'type': 'prompt_blocked_error',
402
+ 'details': str(e)
403
+ }
404
+ }), 400
405
+ else:
406
+ logger.error(f"AI响应处理失败")
407
+ return jsonify({
408
+ 'error': {
409
+ 'message': 'AI响应处理失败',
410
+ 'type': 'response_processing_error'
411
+ }
412
+ }), 500
413
+
414
+ response_data = {
415
+ 'id': 'chatcmpl-xxxxxxxxxxxx',
416
+ 'object': 'chat.completion',
417
+ 'created': int(datetime.now().timestamp()),
418
+ 'model': model,
419
+ 'choices': [{
420
+ 'index': 0,
421
+ 'message': {
422
+ 'role': 'assistant',
423
+ 'content': text_content
424
+ },
425
+ 'finish_reason': 'stop'
426
+ }],
427
+ 'usage': {
428
+ 'prompt_tokens': 0,
429
+ 'completion_tokens': 0,
430
+ 'total_tokens': 0
431
+ }
432
+ }
433
+ logger.info(f"200!")
434
+ return jsonify(response_data)
435
+
436
+ @app.route('/hf/v1/models', methods=['GET'])
437
+ def list_models():
438
+ response = {"object": "list", "data": GEMINI_MODELS}
439
+ return jsonify(response)
440
+
441
+ def keep_alive():
442
+ try:
443
+ response = requests.get("http://127.0.0.1:7860/", timeout=10)
444
+ response.raise_for_status()
445
+ print(f"Keep alive ping successful: {response.status_code} at {time.ctime()}")
446
+ except requests.exceptions.RequestException as e:
447
+ print(f"Keep alive ping failed: {e} at {time.ctime()}")
448
+
449
+ if __name__ == '__main__':
450
+ scheduler = BackgroundScheduler()
451
+
452
+ scheduler.add_job(keep_alive, 'interval', hours=12)
453
+ scheduler.start()
454
+
455
+ logger.info(f"最大尝试次数/MaxRetries: {MAX_RETRIES}")
456
+ logger.info(f"最大请求次数/MaxRequests: {MAX_REQUESTS}")
457
+ logger.info(f"请求限额窗口/LimitWindow: {LIMIT_WINDOW} 秒")
458
+
459
+ app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
func.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ import base64
3
+ from PIL import Image
4
+ from flask import jsonify
5
+ import logging
6
+ import json
7
+ import re
8
+ import os
9
+ import requests
10
+ import google.generativeai as genai
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ request_counts = {}
15
+
16
+ password = os.environ['password']
17
+
18
+ def authenticate_request(request):
19
+ auth_header = request.headers.get('Authorization')
20
+
21
+ if not auth_header:
22
+ return False, jsonify({'error': '缺少Authorization请求头'}), 401
23
+
24
+ try:
25
+ auth_type, pass_word = auth_header.split(' ', 1)
26
+ except ValueError:
27
+ return False, jsonify({'error': 'Authorization请求头格式错误'}), 401
28
+
29
+ if auth_type.lower() != 'bearer':
30
+ return False, jsonify({'error': 'Authorization类型必须为Bearer'}), 401
31
+
32
+ if pass_word != password:
33
+ return False, jsonify({'error': '未授权'}), 401
34
+
35
+ return True, None, None
36
+
37
+ def process_messages_for_gemini(messages):
38
+ gemini_history = []
39
+ errors = []
40
+ for message in messages:
41
+ role = message.get('role')
42
+ content = message.get('content')
43
+
44
+ if isinstance(content, str):
45
+ if role == 'system':
46
+ gemini_history.append({"role": "user", "parts": [content]})
47
+ elif role == 'user':
48
+ gemini_history.append({"role": "user", "parts": [content]})
49
+ elif role == 'assistant':
50
+ gemini_history.append({"role": "model", "parts": [content]})
51
+ else:
52
+ errors.append(f"Invalid role: {role}")
53
+ elif isinstance(content, list):
54
+ parts = []
55
+ for item in content:
56
+ if item.get('type') == 'text':
57
+ parts.append({"text": item.get('text')})
58
+ elif item.get('type') == 'image_url':
59
+ image_data = item.get('image_url', {}).get('url', '')
60
+ if image_data.startswith('data:image/'):
61
+
62
+ try:
63
+ mime_type, base64_data = image_data.split(';')[0].split(':')[1], image_data.split(',')[1]
64
+ parts.append({
65
+ "inline_data": {
66
+ "mime_type": mime_type,
67
+ "data": base64_data
68
+ }
69
+ })
70
+ except (IndexError, ValueError):
71
+ errors.append(f"Invalid data URI for image: {image_data}")
72
+ else:
73
+ errors.append(f"Invalid image URL format for item: {item}")
74
+ elif item.get('type') == 'file_url':
75
+ file_data = item.get('file_url', {}).get('url', '')
76
+ if file_data.startswith('data:'):
77
+
78
+ try:
79
+ mime_type, base64_data = file_data.split(';')[0].split(':')[1], file_data.split(',')[1]
80
+ parts.append({
81
+ "inline_data": {
82
+ "mime_type": mime_type,
83
+ "data": base64_data
84
+ }
85
+ })
86
+ except (IndexError, ValueError):
87
+ errors.append(f"Invalid data URI for file: {file_data}")
88
+ else:
89
+ errors.append(f"Invalid file URL format for item: {item}")
90
+
91
+ if parts:
92
+ if role in ['user', 'system']:
93
+ gemini_history.append({"role": "user", "parts": parts})
94
+ elif role in ['assistant']:
95
+ gemini_history.append({"role": "model", "parts": parts})
96
+ else:
97
+ errors.append(f"Invalid role: {role}")
98
+
99
+ if gemini_history:
100
+ user_message = gemini_history[-1]
101
+ gemini_history = gemini_history[:-1]
102
+ else:
103
+ user_message = {"role": "user", "parts": [""]}
104
+
105
+ if errors:
106
+ return gemini_history, user_message, (jsonify({'error': errors}), 400)
107
+ else:
108
+ return gemini_history, user_message, None