added conditions if the message wasn't a dict for multimodal_and_generation
Browse files
app.py
CHANGED
@@ -102,7 +102,7 @@ def multimodal_and_generation(message, history):
|
|
102 |
# image_path = hist[0][0]
|
103 |
|
104 |
# if image_path is None:
|
105 |
-
input_prompt = message
|
106 |
client = openai.OpenAI(api_key=API_KEY)
|
107 |
stream = client.chat.completions.create(
|
108 |
model="gpt-3.5-turbo",
|
@@ -167,9 +167,10 @@ def bot_comms(message, history):
|
|
167 |
"""
|
168 |
Handles communication between Gradio and the models.
|
169 |
"""
|
170 |
-
|
171 |
-
#
|
172 |
-
|
|
|
173 |
|
174 |
if message["text"] == "check cuda":
|
175 |
logger.debug("Checking CUDA availability.")
|
|
|
102 |
# image_path = hist[0][0]
|
103 |
|
104 |
# if image_path is None:
|
105 |
+
input_prompt = message if isinstance(message, str) else message.get("text", "")
|
106 |
client = openai.OpenAI(api_key=API_KEY)
|
107 |
stream = client.chat.completions.create(
|
108 |
model="gpt-3.5-turbo",
|
|
|
167 |
"""
|
168 |
Handles communication between Gradio and the models.
|
169 |
"""
|
170 |
+
|
171 |
+
# ensures message is a dictionary
|
172 |
+
if not isinstance(message, dict):
|
173 |
+
message = {"text": message}
|
174 |
|
175 |
if message["text"] == "check cuda":
|
176 |
logger.debug("Checking CUDA availability.")
|