Update app.py
Browse files
app.py
CHANGED
@@ -263,12 +263,26 @@ async def chat_completion(request: ChatRequest, authorization: str = Header(None
|
|
263 |
return response.model_dump()
|
264 |
else:
|
265 |
gemini_messages = convert_messages_to_gemini_format(request.messages)
|
266 |
-
payload = {
|
267 |
-
|
268 |
-
|
269 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
}
|
271 |
-
}
|
272 |
|
273 |
if request.stream:
|
274 |
logger.info("Streaming response enabled")
|
|
|
263 |
return response.model_dump()
|
264 |
else:
|
265 |
gemini_messages = convert_messages_to_gemini_format(request.messages)
|
266 |
+
payload = {}
|
267 |
+
if request.model == "gemini-2.0-flash-exp-search":
|
268 |
+
payload = {
|
269 |
+
"contents": gemini_messages,
|
270 |
+
"generationConfig": {
|
271 |
+
"temperature": request.temperature,
|
272 |
+
},
|
273 |
+
"tools": [
|
274 |
+
{
|
275 |
+
"google_search": {}
|
276 |
+
}
|
277 |
+
]
|
278 |
+
}
|
279 |
+
else:
|
280 |
+
payload = {
|
281 |
+
"contents": gemini_messages,
|
282 |
+
"generationConfig": {
|
283 |
+
"temperature": request.temperature,
|
284 |
+
}
|
285 |
}
|
|
|
286 |
|
287 |
if request.stream:
|
288 |
logger.info("Streaming response enabled")
|