sandz7 commited on
Commit
0e1a0ae
Β·
1 Parent(s): dd8c358

removed async

Browse files
Files changed (1) hide show
  1. app.py +36 -36
app.py CHANGED
@@ -9,7 +9,7 @@ import openai
9
  import os
10
  import spaces
11
  import base64
12
- import anyio
13
 
14
  # Setup logging
15
  logging.basicConfig(level=logging.DEBUG)
@@ -229,7 +229,7 @@ def check_cuda_availability():
229
  image_created = {}
230
 
231
  @spaces.GPU(duration=120)
232
- async def bot_comms(message, history):
233
  """
234
  Handles communication between Gradio and the models.
235
  """
@@ -246,45 +246,45 @@ async def bot_comms(message, history):
246
  gpt_outputs = []
247
  stream = generation(message, history)
248
 
249
- try:
250
- async for chunk in stream:
251
- if chunk is not None:
252
- if hasattr(chunk, 'choices') and chunk.choices:
253
- delta_content = chunk.choices[0].delta.get("content", "")
254
- if delta_content:
255
- gpt_outputs.append(delta_content)
256
- yield "".join(gpt_outputs)
257
- elif hasattr(chunk, 'image_url'):
258
- # Process image response
259
- image_url = chunk.image_url.get("url", "")
260
- gpt_outputs.append(f"[Image: {image_url}]")
261
- yield "".join(gpt_outputs)
262
- else:
263
- logger.debug("Chunk does not contain expected attributes.")
264
- else:
265
- logger.debug("Chunk is None.")
266
- except StopAsyncIteration:
267
- pass
268
  # gpt_outputs = []
269
  # # if mode == "chatting" or mode == "":
270
  # # logger.debug("On chatting or no mode.\n\n")
271
  # stream = generation(message, history)
272
  # logger.debug(f"\n\nOn chat mode for bot_comms right now.\n\n")
273
- # for chunk in stream:
274
- # if chunk is not None and hasattr(chunk, 'choices') and chunk.choices:
275
- # delta_content = chunk.choices[0].delta.get("content", "")
276
- # if delta_content:
277
- # gpt_outputs.append(delta_content)
278
- # yield "".join(gpt_outputs)
279
-
280
- # else:
281
- # logger.debug("Chunk does not contain 'choices' attribute or is None.")
282
- # # logger.debug("\n\nFound the chunk in stream for gpt-3.5\n\n")
283
- # # text = chunk.choices[0].delta.content
284
- # # if text:
285
- # # gpt_outputs.append(text)
286
- # # buffer += text
287
- # # yield "".join(gpt_outputs)
288
 
289
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
290
 
 
9
  import os
10
  import spaces
11
  import base64
12
+ # import anyio
13
 
14
  # Setup logging
15
  logging.basicConfig(level=logging.DEBUG)
 
229
  image_created = {}
230
 
231
  @spaces.GPU(duration=120)
232
+ def bot_comms(message, history):
233
  """
234
  Handles communication between Gradio and the models.
235
  """
 
246
  gpt_outputs = []
247
  stream = generation(message, history)
248
 
249
+ # try:
250
+ # async for chunk in stream:
251
+ # if chunk is not None:
252
+ # if hasattr(chunk, 'choices') and chunk.choices:
253
+ # delta_content = chunk.choices[0].delta.get("content", "")
254
+ # if delta_content:
255
+ # gpt_outputs.append(delta_content)
256
+ # yield "".join(gpt_outputs)
257
+ # elif hasattr(chunk, 'image_url'):
258
+ # # Process image response
259
+ # image_url = chunk.image_url.get("url", "")
260
+ # gpt_outputs.append(f"[Image: {image_url}]")
261
+ # yield "".join(gpt_outputs)
262
+ # else:
263
+ # logger.debug("Chunk does not contain expected attributes.")
264
+ # else:
265
+ # logger.debug("Chunk is None.")
266
+ # except StopAsyncIteration:
267
+ # pass
268
  # gpt_outputs = []
269
  # # if mode == "chatting" or mode == "":
270
  # # logger.debug("On chatting or no mode.\n\n")
271
  # stream = generation(message, history)
272
  # logger.debug(f"\n\nOn chat mode for bot_comms right now.\n\n")
273
+ for chunk in stream:
274
+ if chunk is not None and hasattr(chunk, 'choices') and chunk.choices:
275
+ delta_content = chunk.choices[0].delta.get("content", "")
276
+ if delta_content:
277
+ gpt_outputs.append(delta_content)
278
+ yield "".join(gpt_outputs)
279
+
280
+ else:
281
+ logger.debug("Chunk does not contain 'choices' attribute or is None.")
282
+ # logger.debug("\n\nFound the chunk in stream for gpt-3.5\n\n")
283
+ # text = chunk.choices[0].delta.content
284
+ # if text:
285
+ # gpt_outputs.append(text)
286
+ # buffer += text
287
+ # yield "".join(gpt_outputs)
288
 
289
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
290