File size: 10,401 Bytes
93e2cb7
 
 
 
 
 
 
 
 
 
 
bd6345a
 
93e2cb7
 
 
 
 
 
 
 
 
 
 
bd6345a
 
 
93e2cb7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bd6345a
93e2cb7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483a4b8
 
93e2cb7
 
483a4b8
93e2cb7
 
 
 
 
 
483a4b8
93e2cb7
 
 
bd6345a
93e2cb7
 
 
 
 
 
 
 
612d4f6
93e2cb7
 
 
483a4b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fb95d2
 
e710865
93e2cb7
 
 
 
 
bd6345a
93e2cb7
 
 
 
7fd2af4
 
7fb95d2
 
e710865
7fb95d2
 
483a4b8
7fb95d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d67ffc1
7fb95d2
e710865
7fb95d2
e710865
 
93e2cb7
d67ffc1
 
27288d3
93e2cb7
 
483a4b8
93e2cb7
 
483a4b8
93e2cb7
 
bd6345a
93e2cb7
483a4b8
93e2cb7
 
 
bd6345a
 
483a4b8
 
 
 
 
bd6345a
93e2cb7
 
483a4b8
93e2cb7
 
 
 
 
 
 
 
483a4b8
93e2cb7
 
 
 
 
483a4b8
 
 
 
93e2cb7
 
 
 
 
483a4b8
93e2cb7
 
483a4b8
93e2cb7
483a4b8
93e2cb7
 
 
bd6345a
 
483a4b8
bd6345a
 
 
 
93e2cb7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483a4b8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
from datetime import datetime
import json
import uuid
import asyncio
import random
from typing import Any, Dict, Optional

import httpx
from fastapi import HTTPException
from api.config import (
    MODEL_MAPPING,
    get_headers_api_chat,
    get_headers_chat,
    BASE_URL,
    AGENT_MODE,
    TRENDING_AGENT_MODE,
    MODEL_PREFIXES,
)
from api.models import ChatRequest
from api.logger import setup_logger
from api.validate import getHid  # Import the asynchronous getHid function

logger = setup_logger(__name__)

# Define the blocked message
BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai"

# Helper function to create chat completion data
def create_chat_completion_data(
    content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
) -> Dict[str, Any]:
    return {
        "id": f"chatcmpl-{uuid.uuid4()}",
        "object": "chat.completion.chunk",
        "created": timestamp,
        "model": model,
        "choices": [
            {
                "index": 0,
                "delta": {"content": content, "role": "assistant"},
                "finish_reason": finish_reason,
            }
        ],
        "usage": None,
    }

# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
def message_to_dict(message, model_prefix: Optional[str] = None):
    content = message.content if isinstance(message.content, str) else message.content[0]["text"]
    if model_prefix:
        content = f"{model_prefix} {content}"
    if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
        # Ensure base64 images are always included for all models
        return {
            "role": message.role,
            "content": content,
            "data": {
                "imageBase64": message.content[1]["image_url"]["url"],
                "fileText": "",
                "title": "snapshot",
            },
        }
    return {"role": message.role, "content": content}

# Function to strip model prefix from content if present
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
    """Remove the model prefix from the response content if present."""
    if model_prefix and content.startswith(model_prefix):
        logger.debug(f"Stripping prefix '{model_prefix}' from content.")
        return content[len(model_prefix):].strip()
    return content

# Helper function to build JSON data for the request
def build_json_data(request: ChatRequest, h_value: str, model_prefix: Optional[str]):
    agent_mode = AGENT_MODE.get(request.model, {})
    trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
    return {
        "agentMode": agent_mode,
        "clickedAnswer2": False,
        "clickedAnswer3": False,
        "clickedForceWebSearch": False,
        "codeModelMode": True,
        "githubToken": None,
        "id": None,
        "isChromeExt": False,
        "isMicMode": False,
        "maxTokens": request.max_tokens,
        "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
        "mobileClient": False,
        "playgroundTemperature": request.temperature,
        "playgroundTopP": request.top_p,
        "previewToken": None,
        "trendingAgentMode": trending_agent_mode,
        "userId": None,
        "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
        "userSystemPrompt": None,
        "validated": "00f37b34-a166-4efb-bce5-1312d87f2f94",
        "visitFromDelta": False,
    }

# Process streaming response with headers from config.py
async def process_streaming_response(request: ChatRequest):
    logger.info(f"Processing request - Model: {request.model}")

    model_prefix = MODEL_PREFIXES.get(request.model, "")

    # Adjust headers_api_chat since referer_url is removed
    headers_api_chat = get_headers_api_chat(BASE_URL)

    if request.model == 'o1-preview':
        delay_seconds = random.randint(1, 60)
        logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
        await asyncio.sleep(delay_seconds)

    # Fetch the h-value for the 'validated' field
    h_value = await getHid()
    if not h_value:
        logger.error("Failed to retrieve h-value for validation.")
        raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")

    json_data = build_json_data(request, h_value, model_prefix)

    # Initialize rolling buffer to handle BLOCKED_MESSAGE split across chunks
    rolling_buffer = ""

    async with httpx.AsyncClient() as client:
        try:
            async with client.stream(
                "POST",
                f"{BASE_URL}/api/chat",
                headers=headers_api_chat,
                json=json_data,
                timeout=100,
            ) as response:
                response.raise_for_status()
                async for chunk in response.aiter_text():
                    if chunk:
                        # Combine rolling buffer with current chunk
                        combined_chunk = rolling_buffer + chunk

                        # Remove any occurrence of BLOCKED_MESSAGE in combined_chunk
                        if BLOCKED_MESSAGE in combined_chunk:
                            logger.info("Blocked message detected in response.")
                            combined_chunk = combined_chunk.replace(BLOCKED_MESSAGE, '')

                        # Remove model prefix if present
                        cleaned_content = strip_model_prefix(combined_chunk, model_prefix)

                        # Yield the cleaned content
                        timestamp = int(datetime.now().timestamp())
                        yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"

                        # Update rolling buffer with the end of the combined_chunk
                        # Keep only the last len(BLOCKED_MESSAGE) - 1 characters
                        rolling_buffer = combined_chunk[-(len(BLOCKED_MESSAGE) - 1):]
                    else:
                        # If chunk is empty, reset rolling buffer
                        rolling_buffer = ""

                # After streaming is done, check if any remaining content is in the rolling buffer
                if rolling_buffer:
                    # Remove any occurrence of BLOCKED_MESSAGE in rolling buffer
                    if BLOCKED_MESSAGE in rolling_buffer:
                        logger.info("Blocked message detected in remaining buffer.")
                        rolling_buffer = rolling_buffer.replace(BLOCKED_MESSAGE, '')

                    cleaned_content = strip_model_prefix(rolling_buffer, model_prefix)
                    timestamp = int(datetime.now().timestamp())
                    yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"

                # Signal the end of the streaming
                timestamp = int(datetime.now().timestamp())
                yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
                yield "data: [DONE]\n\n"
        except httpx.HTTPStatusError as e:
            logger.error(f"HTTP error occurred: {e}")
            raise HTTPException(status_code=e.response.status_code, detail=str(e))
        except httpx.RequestError as e:
            logger.error(f"Error occurred during request: {e}")
            raise HTTPException(status_code=500, detail=str(e))

# Process non-streaming response with headers from config.py
async def process_non_streaming_response(request: ChatRequest):
    logger.info(f"Processing request - Model: {request.model}")

    model_prefix = MODEL_PREFIXES.get(request.model, "")

    # Adjust headers_api_chat and headers_chat since referer_url is removed
    headers_api_chat = get_headers_api_chat(BASE_URL)
    headers_chat = get_headers_chat(
        BASE_URL,
        next_action=str(uuid.uuid4()),
        next_router_state_tree=json.dumps([""])
    )

    if request.model == 'o1-preview':
        delay_seconds = random.randint(20, 60)
        logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
        await asyncio.sleep(delay_seconds)

    # Fetch the h-value for the 'validated' field
    h_value = await getHid()
    if not h_value:
        logger.error("Failed to retrieve h-value for validation.")
        raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")

    json_data = build_json_data(request, h_value, model_prefix)

    full_response = ""
    async with httpx.AsyncClient() as client:
        try:
            async with client.stream(
                method="POST",
                url=f"{BASE_URL}/api/chat",
                headers=headers_api_chat,
                json=json_data,
            ) as response:
                response.raise_for_status()
                async for chunk in response.aiter_text():
                    full_response += chunk
        except httpx.HTTPStatusError as e:
            logger.error(f"HTTP error occurred: {e}")
            raise HTTPException(status_code=e.response.status_code, detail=str(e))
        except httpx.RequestError as e:
            logger.error(f"Error occurred during request: {e}")
            raise HTTPException(status_code=500, detail=str(e))

    if full_response.startswith("$@$v=undefined-rv1$@$"):
        full_response = full_response[21:]

    # Remove the blocked message if present
    if BLOCKED_MESSAGE in full_response:
        logger.info("Blocked message detected in response.")
        full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
        if not full_response:
            raise HTTPException(status_code=500, detail="Blocked message detected in response.")

    cleaned_full_response = strip_model_prefix(full_response, model_prefix)

    return {
        "id": f"chatcmpl-{uuid.uuid4()}",
        "object": "chat.completion",
        "created": int(datetime.now().timestamp()),
        "model": request.model,
        "choices": [
            {
                "index": 0,
                "message": {"role": "assistant", "content": cleaned_full_response},
                "finish_reason": "stop",
            }
        ],
        "usage": None,
    }