Niansuh commited on
Commit
93e2cb7
·
verified ·
1 Parent(s): fd78367

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +230 -233
api/utils.py CHANGED
@@ -1,233 +1,230 @@
1
- # main.py or your main application file
2
-
3
- from datetime import datetime
4
- import json
5
- import uuid
6
- import asyncio
7
- import random
8
- import string
9
- from typing import Any, Dict, Optional
10
-
11
- import httpx
12
- from fastapi import HTTPException
13
- from api.config import (
14
- MODEL_MAPPING,
15
- get_headers_api_chat,
16
- get_headers_chat,
17
- BASE_URL,
18
- AGENT_MODE,
19
- TRENDING_AGENT_MODE,
20
- MODEL_PREFIXES,
21
- MODEL_REFERERS
22
- )
23
- from api.models import ChatRequest
24
- from api.logger import setup_logger
25
- from api.validate import getHid # Import the asynchronous getHid function
26
-
27
- logger = setup_logger(__name__)
28
-
29
- # Helper function to create chat completion data
30
- def create_chat_completion_data(
31
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
32
- ) -> Dict[str, Any]:
33
- return {
34
- "id": f"chatcmpl-{uuid.uuid4()}",
35
- "object": "chat.completion.chunk",
36
- "created": timestamp,
37
- "model": model,
38
- "choices": [
39
- {
40
- "index": 0,
41
- "delta": {"content": content, "role": "assistant"},
42
- "finish_reason": finish_reason,
43
- }
44
- ],
45
- "usage": None,
46
- }
47
-
48
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
49
- def message_to_dict(message, model_prefix: Optional[str] = None):
50
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
51
- if model_prefix:
52
- content = f"{model_prefix} {content}"
53
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
54
- # Ensure base64 images are always included for all models
55
- return {
56
- "role": message.role,
57
- "content": content,
58
- "data": {
59
- "imageBase64": message.content[1]["image_url"]["url"],
60
- "fileText": "",
61
- "title": "snapshot",
62
- },
63
- }
64
- return {"role": message.role, "content": content}
65
-
66
- # Function to strip model prefix from content if present
67
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
68
- """Remove the model prefix from the response content if present."""
69
- if model_prefix and content.startswith(model_prefix):
70
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
71
- return content[len(model_prefix):].strip()
72
- return content
73
-
74
- # Process streaming response with headers from config.py
75
- async def process_streaming_response(request: ChatRequest):
76
- # Generate a unique ID for this request
77
- request_id = f"chatcmpl-{uuid.uuid4()}"
78
- logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
79
-
80
- agent_mode = AGENT_MODE.get(request.model, {})
81
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
82
- model_prefix = MODEL_PREFIXES.get(request.model, "")
83
-
84
- # Adjust headers_api_chat since referer_url is removed
85
- headers_api_chat = get_headers_api_chat(BASE_URL)
86
-
87
- if request.model == 'o1-preview':
88
- delay_seconds = random.randint(1, 60)
89
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
90
- await asyncio.sleep(delay_seconds)
91
-
92
- # Fetch the h-value for the 'validated' field
93
- h_value = await getHid()
94
- if not h_value:
95
- logger.error("Failed to retrieve h-value for validation.")
96
- raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
97
-
98
- json_data = {
99
- "agentMode": agent_mode,
100
- "clickedAnswer2": False,
101
- "clickedAnswer3": False,
102
- "clickedForceWebSearch": False,
103
- "codeModelMode": True,
104
- "githubToken": None,
105
- "id": None, # Using request_id instead of chat_id
106
- "isChromeExt": False,
107
- "isMicMode": False,
108
- "maxTokens": request.max_tokens,
109
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
110
- "mobileClient": False,
111
- "playgroundTemperature": request.temperature,
112
- "playgroundTopP": request.top_p,
113
- "previewToken": None,
114
- "trendingAgentMode": trending_agent_mode,
115
- "userId": None,
116
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
117
- "userSystemPrompt": None,
118
- "validated": h_value, # Dynamically set the validated field
119
- "visitFromDelta": False,
120
- }
121
-
122
- async with httpx.AsyncClient() as client:
123
- try:
124
- async with client.stream(
125
- "POST",
126
- f"{BASE_URL}/api/chat",
127
- headers=headers_api_chat,
128
- json=json_data,
129
- timeout=100,
130
- ) as response:
131
- response.raise_for_status()
132
- async for line in response.aiter_lines():
133
- timestamp = int(datetime.now().timestamp())
134
- if line:
135
- content = line
136
- if content.startswith("$@$v=undefined-rv1$@$"):
137
- content = content[21:]
138
- cleaned_content = strip_model_prefix(content, model_prefix)
139
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
140
-
141
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
142
- yield "data: [DONE]\n\n"
143
- except httpx.HTTPStatusError as e:
144
- logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
145
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
146
- except httpx.RequestError as e:
147
- logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
148
- raise HTTPException(status_code=500, detail=str(e))
149
-
150
- # Process non-streaming response with headers from config.py
151
- async def process_non_streaming_response(request: ChatRequest):
152
- # Generate a unique ID for this request
153
- request_id = f"chatcmpl-{uuid.uuid4()}"
154
- logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
155
-
156
- agent_mode = AGENT_MODE.get(request.model, {})
157
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
158
- model_prefix = MODEL_PREFIXES.get(request.model, "")
159
-
160
- # Adjust headers_api_chat and headers_chat since referer_url is removed
161
- headers_api_chat = get_headers_api_chat(BASE_URL)
162
- headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
163
-
164
- if request.model == 'o1-preview':
165
- delay_seconds = random.randint(20, 60)
166
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
167
- await asyncio.sleep(delay_seconds)
168
-
169
- # Fetch the h-value for the 'validated' field
170
- h_value = await getHid()
171
- if not h_value:
172
- logger.error("Failed to retrieve h-value for validation.")
173
- raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
174
-
175
- json_data = {
176
- "agentMode": agent_mode,
177
- "clickedAnswer2": False,
178
- "clickedAnswer3": False,
179
- "clickedForceWebSearch": False,
180
- "codeModelMode": True,
181
- "githubToken": None,
182
- "id": None, # Using request_id instead of chat_id
183
- "isChromeExt": False,
184
- "isMicMode": False,
185
- "maxTokens": request.max_tokens,
186
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
187
- "mobileClient": False,
188
- "playgroundTemperature": request.temperature,
189
- "playgroundTopP": request.top_p,
190
- "previewToken": None,
191
- "trendingAgentMode": trending_agent_mode,
192
- "userId": None,
193
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
194
- "userSystemPrompt": None,
195
- "validated": h_value, # Dynamically set the validated field
196
- "visitFromDelta": False,
197
- }
198
-
199
- full_response = ""
200
- async with httpx.AsyncClient() as client:
201
- try:
202
- async with client.stream(
203
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
204
- ) as response:
205
- response.raise_for_status()
206
- async for chunk in response.aiter_text():
207
- full_response += chunk
208
- except httpx.HTTPStatusError as e:
209
- logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
210
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
211
- except httpx.RequestError as e:
212
- logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
213
- raise HTTPException(status_code=500, detail=str(e))
214
-
215
- if full_response.startswith("$@$v=undefined-rv1$@$"):
216
- full_response = full_response[21:]
217
-
218
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
219
-
220
- return {
221
- "id": f"chatcmpl-{uuid.uuid4()}",
222
- "object": "chat.completion",
223
- "created": int(datetime.now().timestamp()),
224
- "model": request.model,
225
- "choices": [
226
- {
227
- "index": 0,
228
- "message": {"role": "assistant", "content": cleaned_full_response},
229
- "finish_reason": "stop",
230
- }
231
- ],
232
- "usage": None,
233
- }
 
1
+ from datetime import datetime
2
+ import json
3
+ import uuid
4
+ import asyncio
5
+ import random
6
+ from typing import Any, Dict, Optional
7
+
8
+ import httpx
9
+ from fastapi import HTTPException
10
+ from api.config import (
11
+ MODEL_MAPPING,
12
+ get_headers_api_chat,
13
+ get_headers_chat,
14
+ BASE_URL,
15
+ AGENT_MODE,
16
+ TRENDING_AGENT_MODE,
17
+ MODEL_PREFIXES,
18
+ MODEL_REFERERS
19
+ )
20
+ from api.models import ChatRequest
21
+ from api.logger import setup_logger
22
+ from api.validate import getHid # Import the asynchronous getHid function
23
+
24
+ logger = setup_logger(__name__)
25
+
26
+ # Helper function to create chat completion data
27
+ def create_chat_completion_data(
28
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
29
+ ) -> Dict[str, Any]:
30
+ return {
31
+ "id": f"chatcmpl-{uuid.uuid4()}",
32
+ "object": "chat.completion.chunk",
33
+ "created": timestamp,
34
+ "model": model,
35
+ "choices": [
36
+ {
37
+ "index": 0,
38
+ "delta": {"content": content, "role": "assistant"},
39
+ "finish_reason": finish_reason,
40
+ }
41
+ ],
42
+ "usage": None,
43
+ }
44
+
45
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
46
+ def message_to_dict(message, model_prefix: Optional[str] = None):
47
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
48
+ if model_prefix:
49
+ content = f"{model_prefix} {content}"
50
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
51
+ # Ensure base64 images are always included for all models
52
+ return {
53
+ "role": message.role,
54
+ "content": content,
55
+ "data": {
56
+ "imageBase64": message.content[1]["image_url"]["url"],
57
+ "fileText": "",
58
+ "title": "snapshot",
59
+ },
60
+ }
61
+ return {"role": message.role, "content": content}
62
+
63
+ # Function to strip model prefix from content if present
64
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
65
+ """Remove the model prefix from the response content if present."""
66
+ if model_prefix and content.startswith(model_prefix):
67
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
68
+ return content[len(model_prefix):].strip()
69
+ return content
70
+
71
+ # Process streaming response with headers from config.py
72
+ async def process_streaming_response(request: ChatRequest):
73
+ # Generate a unique ID for this request
74
+ request_id = f"chatcmpl-{uuid.uuid4()}"
75
+ logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
76
+
77
+ agent_mode = AGENT_MODE.get(request.model, {})
78
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
79
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
80
+
81
+ # Adjust headers_api_chat since referer_url is removed
82
+ headers_api_chat = get_headers_api_chat()
83
+
84
+ if request.model == 'o1-preview':
85
+ delay_seconds = random.randint(1, 60)
86
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
87
+ await asyncio.sleep(delay_seconds)
88
+
89
+ # Fetch the h-value for the 'validated' field
90
+ h_value = await getHid()
91
+ if not h_value:
92
+ logger.error("Failed to retrieve h-value for validation.")
93
+ raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
94
+
95
+ json_data = {
96
+ "agentMode": agent_mode,
97
+ "clickedAnswer2": False,
98
+ "clickedAnswer3": False,
99
+ "clickedForceWebSearch": False,
100
+ "codeModelMode": True,
101
+ "githubToken": None,
102
+ "id": None, # Using request_id instead of chat_id
103
+ "isChromeExt": False,
104
+ "isMicMode": False,
105
+ "maxTokens": request.max_tokens,
106
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
107
+ "mobileClient": False,
108
+ "playgroundTemperature": request.temperature,
109
+ "playgroundTopP": request.top_p,
110
+ "previewToken": None,
111
+ "trendingAgentMode": trending_agent_mode,
112
+ "userId": None,
113
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
114
+ "userSystemPrompt": None,
115
+ "validated": h_value, # Dynamically set the validated field
116
+ "visitFromDelta": False,
117
+ }
118
+
119
+ async with httpx.AsyncClient() as client:
120
+ try:
121
+ async with client.stream(
122
+ "POST",
123
+ f"{BASE_URL}/api/chat",
124
+ headers=headers_api_chat,
125
+ json=json_data,
126
+ timeout=100,
127
+ ) as response:
128
+ response.raise_for_status()
129
+ async for line in response.aiter_lines():
130
+ timestamp = int(datetime.now().timestamp())
131
+ if line:
132
+ content = line
133
+ if content.startswith("$@$v=undefined-rv1$@$"):
134
+ content = content[21:]
135
+ cleaned_content = strip_model_prefix(content, model_prefix)
136
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
137
+
138
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
139
+ yield "data: [DONE]\n\n"
140
+ except httpx.HTTPStatusError as e:
141
+ logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
142
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
143
+ except httpx.RequestError as e:
144
+ logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
145
+ raise HTTPException(status_code=500, detail=str(e))
146
+
147
+ # Process non-streaming response with headers from config.py
148
+ async def process_non_streaming_response(request: ChatRequest):
149
+ # Generate a unique ID for this request
150
+ request_id = f"chatcmpl-{uuid.uuid4()}"
151
+ logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
152
+
153
+ agent_mode = AGENT_MODE.get(request.model, {})
154
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
155
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
156
+
157
+ # Adjust headers_api_chat and headers_chat since referer_url is removed
158
+ headers_api_chat = get_headers_api_chat()
159
+ headers_chat = get_headers_chat(next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
160
+
161
+ if request.model == 'o1-preview':
162
+ delay_seconds = random.randint(20, 60)
163
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
164
+ await asyncio.sleep(delay_seconds)
165
+
166
+ # Fetch the h-value for the 'validated' field
167
+ h_value = await getHid()
168
+ if not h_value:
169
+ logger.error("Failed to retrieve h-value for validation.")
170
+ raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
171
+
172
+ json_data = {
173
+ "agentMode": agent_mode,
174
+ "clickedAnswer2": False,
175
+ "clickedAnswer3": False,
176
+ "clickedForceWebSearch": False,
177
+ "codeModelMode": True,
178
+ "githubToken": None,
179
+ "id": None, # Using request_id instead of chat_id
180
+ "isChromeExt": False,
181
+ "isMicMode": False,
182
+ "maxTokens": request.max_tokens,
183
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
184
+ "mobileClient": False,
185
+ "playgroundTemperature": request.temperature,
186
+ "playgroundTopP": request.top_p,
187
+ "previewToken": None,
188
+ "trendingAgentMode": trending_agent_mode,
189
+ "userId": None,
190
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
191
+ "userSystemPrompt": None,
192
+ "validated": h_value, # Dynamically set the validated field
193
+ "visitFromDelta": False,
194
+ }
195
+
196
+ full_response = ""
197
+ async with httpx.AsyncClient() as client:
198
+ try:
199
+ async with client.stream(
200
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
201
+ ) as response:
202
+ response.raise_for_status()
203
+ async for chunk in response.aiter_text():
204
+ full_response += chunk
205
+ except httpx.HTTPStatusError as e:
206
+ logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
207
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
208
+ except httpx.RequestError as e:
209
+ logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
210
+ raise HTTPException(status_code=500, detail=str(e))
211
+
212
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
213
+ full_response = full_response[21:]
214
+
215
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
216
+
217
+ return {
218
+ "id": f"chatcmpl-{uuid.uuid4()}",
219
+ "object": "chat.completion",
220
+ "created": int(datetime.now().timestamp()),
221
+ "model": request.model,
222
+ "choices": [
223
+ {
224
+ "index": 0,
225
+ "message": {"role": "assistant", "content": cleaned_full_response},
226
+ "finish_reason": "stop",
227
+ }
228
+ ],
229
+ "usage": None,
230
+ }