Niansuh commited on
Commit
fc2a470
·
verified ·
1 Parent(s): 91ef7d9

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +22 -24
api/utils.py CHANGED
@@ -14,7 +14,6 @@ from api.config import (
14
  AGENT_MODE,
15
  TRENDING_AGENT_MODE,
16
  MODEL_PREFIXES,
17
- MODEL_REFERERS
18
  )
19
  from api.models import ChatRequest
20
  from api.logger import setup_logger
@@ -46,7 +45,11 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
46
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
47
  if model_prefix:
48
  content = f"{model_prefix} {content}"
49
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
 
 
 
 
50
  # Ensure base64 images are always included for all models
51
  return {
52
  "role": message.role,
@@ -67,7 +70,7 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
67
  return content[len(model_prefix):].strip()
68
  return content
69
 
70
- # Process streaming response without header functions
71
  async def process_streaming_response(request: ChatRequest):
72
  # Generate a unique ID for this request
73
  request_id = f"chatcmpl-{uuid.uuid4()}"
@@ -77,12 +80,11 @@ async def process_streaming_response(request: ChatRequest):
77
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
78
  model_prefix = MODEL_PREFIXES.get(request.model, "")
79
 
80
- # Define headers directly
81
- headers_api_chat = {**common_headers, 'Content-Type': 'application/json'}
82
-
83
  if request.model == 'o1-preview':
84
  delay_seconds = random.randint(1, 60)
85
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
 
 
86
  await asyncio.sleep(delay_seconds)
87
 
88
  # Fetch the h-value for the 'validated' field
@@ -102,7 +104,9 @@ async def process_streaming_response(request: ChatRequest):
102
  "isChromeExt": False,
103
  "isMicMode": False,
104
  "maxTokens": request.max_tokens,
105
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
106
  "mobileClient": False,
107
  "playgroundTemperature": request.temperature,
108
  "playgroundTopP": request.top_p,
@@ -120,7 +124,6 @@ async def process_streaming_response(request: ChatRequest):
120
  async with client.stream(
121
  "POST",
122
  f"{BASE_URL}/api/chat",
123
- headers=headers_api_chat,
124
  json=json_data,
125
  timeout=100,
126
  ) as response:
@@ -143,7 +146,7 @@ async def process_streaming_response(request: ChatRequest):
143
  logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
144
  raise HTTPException(status_code=500, detail=str(e))
145
 
146
- # Process non-streaming response without header functions
147
  async def process_non_streaming_response(request: ChatRequest):
148
  # Generate a unique ID for this request
149
  request_id = f"chatcmpl-{uuid.uuid4()}"
@@ -153,20 +156,11 @@ async def process_non_streaming_response(request: ChatRequest):
153
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
154
  model_prefix = MODEL_PREFIXES.get(request.model, "")
155
 
156
- # Define headers directly
157
- headers_api_chat = {**common_headers, 'Content-Type': 'application/json'}
158
- headers_chat = {
159
- **common_headers,
160
- 'Accept': 'text/x-component',
161
- 'Content-Type': 'text/plain;charset=UTF-8',
162
- 'next-action': str(uuid.uuid4()),
163
- 'next-router-state-tree': json.dumps([""]),
164
- 'next-url': '/',
165
- }
166
-
167
  if request.model == 'o1-preview':
168
  delay_seconds = random.randint(20, 60)
169
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
 
 
170
  await asyncio.sleep(delay_seconds)
171
 
172
  # Fetch the h-value for the 'validated' field
@@ -186,7 +180,9 @@ async def process_non_streaming_response(request: ChatRequest):
186
  "isChromeExt": False,
187
  "isMicMode": False,
188
  "maxTokens": request.max_tokens,
189
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
190
  "mobileClient": False,
191
  "playgroundTemperature": request.temperature,
192
  "playgroundTopP": request.top_p,
@@ -203,7 +199,9 @@ async def process_non_streaming_response(request: ChatRequest):
203
  async with httpx.AsyncClient() as client:
204
  try:
205
  async with client.stream(
206
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
 
 
207
  ) as response:
208
  response.raise_for_status()
209
  async for chunk in response.aiter_text():
 
14
  AGENT_MODE,
15
  TRENDING_AGENT_MODE,
16
  MODEL_PREFIXES,
 
17
  )
18
  from api.models import ChatRequest
19
  from api.logger import setup_logger
 
45
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
46
  if model_prefix:
47
  content = f"{model_prefix} {content}"
48
+ if (
49
+ isinstance(message.content, list)
50
+ and len(message.content) == 2
51
+ and "image_url" in message.content[1]
52
+ ):
53
  # Ensure base64 images are always included for all models
54
  return {
55
  "role": message.role,
 
70
  return content[len(model_prefix):].strip()
71
  return content
72
 
73
+ # Process streaming response without headers
74
  async def process_streaming_response(request: ChatRequest):
75
  # Generate a unique ID for this request
76
  request_id = f"chatcmpl-{uuid.uuid4()}"
 
80
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
81
  model_prefix = MODEL_PREFIXES.get(request.model, "")
82
 
 
 
 
83
  if request.model == 'o1-preview':
84
  delay_seconds = random.randint(1, 60)
85
+ logger.info(
86
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})"
87
+ )
88
  await asyncio.sleep(delay_seconds)
89
 
90
  # Fetch the h-value for the 'validated' field
 
104
  "isChromeExt": False,
105
  "isMicMode": False,
106
  "maxTokens": request.max_tokens,
107
+ "messages": [
108
+ message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
109
+ ],
110
  "mobileClient": False,
111
  "playgroundTemperature": request.temperature,
112
  "playgroundTopP": request.top_p,
 
124
  async with client.stream(
125
  "POST",
126
  f"{BASE_URL}/api/chat",
 
127
  json=json_data,
128
  timeout=100,
129
  ) as response:
 
146
  logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
147
  raise HTTPException(status_code=500, detail=str(e))
148
 
149
+ # Process non-streaming response without headers
150
  async def process_non_streaming_response(request: ChatRequest):
151
  # Generate a unique ID for this request
152
  request_id = f"chatcmpl-{uuid.uuid4()}"
 
156
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
157
  model_prefix = MODEL_PREFIXES.get(request.model, "")
158
 
 
 
 
 
 
 
 
 
 
 
 
159
  if request.model == 'o1-preview':
160
  delay_seconds = random.randint(20, 60)
161
+ logger.info(
162
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})"
163
+ )
164
  await asyncio.sleep(delay_seconds)
165
 
166
  # Fetch the h-value for the 'validated' field
 
180
  "isChromeExt": False,
181
  "isMicMode": False,
182
  "maxTokens": request.max_tokens,
183
+ "messages": [
184
+ message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
185
+ ],
186
  "mobileClient": False,
187
  "playgroundTemperature": request.temperature,
188
  "playgroundTopP": request.top_p,
 
199
  async with httpx.AsyncClient() as client:
200
  try:
201
  async with client.stream(
202
+ method="POST",
203
+ url=f"{BASE_URL}/api/chat",
204
+ json=json_data,
205
  ) as response:
206
  response.raise_for_status()
207
  async for chunk in response.aiter_text():