removed the message['text'] from the input_prompt as there is no multimodal
Browse files
app.py
CHANGED
@@ -96,7 +96,7 @@ def multimodal_and_generation(message, history):
|
|
96 |
image_path = hist[0][0]
|
97 |
|
98 |
if image_path is None:
|
99 |
-
input_prompt = message
|
100 |
client = openai.OpenAI(api_key=API_KEY)
|
101 |
stream = client.chat.completions.create(
|
102 |
model="gpt-3.5-turbo",
|
@@ -198,12 +198,12 @@ def bot_comms(message, history):
|
|
198 |
# # logger.debug(f"\nimagery mode: {mode}\n")
|
199 |
# return "Imagery On! Type your prompt to make the image πΌοΈ"
|
200 |
|
201 |
-
if message["text"] == "chatting":
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
|
208 |
# if mode == "imagery":
|
209 |
# logger.debug("Processing imagery prompt.")
|
@@ -216,21 +216,21 @@ def bot_comms(message, history):
|
|
216 |
|
217 |
buffer = ""
|
218 |
gpt_outputs = []
|
219 |
-
if mode == "chatting" or mode == "":
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
|
232 |
chatbot = gr.Chatbot(height=600, label="Chimera AI")
|
233 |
-
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
|
234 |
|
235 |
# with gr.Blocks(fill_height=True) as demo:
|
236 |
# gr.Markdown(DESCRIPTION)
|
|
|
96 |
image_path = hist[0][0]
|
97 |
|
98 |
if image_path is None:
|
99 |
+
input_prompt = message
|
100 |
client = openai.OpenAI(api_key=API_KEY)
|
101 |
stream = client.chat.completions.create(
|
102 |
model="gpt-3.5-turbo",
|
|
|
198 |
# # logger.debug(f"\nimagery mode: {mode}\n")
|
199 |
# return "Imagery On! Type your prompt to make the image πΌοΈ"
|
200 |
|
201 |
+
# if message["text"] == "chatting":
|
202 |
+
# logger.debug("Switching to chatting mode.")
|
203 |
+
# # mode_manager.set_mode("chatting")
|
204 |
+
# mode += "chatting"
|
205 |
+
# # logger.debug(f"\nchatting mode: {mode}\n")
|
206 |
+
# return "Imagery Off. Ask me any questions. βοΈ"
|
207 |
|
208 |
# if mode == "imagery":
|
209 |
# logger.debug("Processing imagery prompt.")
|
|
|
216 |
|
217 |
buffer = ""
|
218 |
gpt_outputs = []
|
219 |
+
# if mode == "chatting" or mode == "":
|
220 |
+
# logger.debug("On chatting or no mode.\n\n")
|
221 |
+
stream = multimodal_and_generation(message, history)
|
222 |
+
mode += "chatting"
|
223 |
+
for chunk in stream:
|
224 |
+
if chunk is not None and hasattr(chunk.choices[0].delta, "content"):
|
225 |
+
logger.debug("\n\nFound the chunk in stream for gpt-3.5\n\n")
|
226 |
+
text = chunk.choices[0].delta.content
|
227 |
+
if text:
|
228 |
+
gpt_outputs.append(text)
|
229 |
+
buffer += text
|
230 |
+
yield "".join(gpt_outputs)
|
231 |
|
232 |
chatbot = gr.Chatbot(height=600, label="Chimera AI")
|
233 |
+
# chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], placeholder="Enter your question or upload an image.", show_label=False)
|
234 |
|
235 |
# with gr.Blocks(fill_height=True) as demo:
|
236 |
# gr.Markdown(DESCRIPTION)
|