placed custom_fn for chatbot to handle image outputs
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from transformers import TextIteratorStreamer, AutoProcessor, LlavaForConditiona
|
|
4 |
from diffusers import DiffusionPipeline
|
5 |
import gradio as gr
|
6 |
import numpy as np
|
7 |
-
from PIL import Image
|
8 |
import threading
|
9 |
import openai
|
10 |
import os
|
@@ -109,6 +109,7 @@ def multimodal_and_generation(message, history):
|
|
109 |
thread.start()
|
110 |
|
111 |
return streamer
|
|
|
112 |
|
113 |
def diffusing(prompt):
|
114 |
"""
|
@@ -127,7 +128,7 @@ def diffusing(prompt):
|
|
127 |
image=image
|
128 |
).images[0]
|
129 |
return image
|
130 |
-
|
131 |
def check_cuda_availability():
|
132 |
if torch.cuda.is_available():
|
133 |
return f"GPU: {torch.cuda.get_device_name(0)}"
|
@@ -136,6 +137,30 @@ def check_cuda_availability():
|
|
136 |
|
137 |
mode = ""
|
138 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
@spaces.GPU(duration=120)
|
140 |
def bot_comms(message, history):
|
141 |
"""
|
@@ -166,7 +191,9 @@ def bot_comms(message, history):
|
|
166 |
logger.debug("Processing imagery prompt.")
|
167 |
message = message["text"]
|
168 |
image = diffusing(message)
|
169 |
-
|
|
|
|
|
170 |
return
|
171 |
|
172 |
buffer = ""
|
@@ -188,8 +215,16 @@ chat_input = gr.MultimodalTextbox(interactive=True, file_types=["images"], place
|
|
188 |
|
189 |
with gr.Blocks(fill_height=True) as demo:
|
190 |
gr.Markdown(DESCRIPTION)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
gr.ChatInterface(
|
192 |
-
fn=
|
193 |
chatbot=chatbot,
|
194 |
fill_height=True,
|
195 |
multimodal=True,
|
|
|
4 |
from diffusers import DiffusionPipeline
|
5 |
import gradio as gr
|
6 |
import numpy as np
|
7 |
+
from PIL import Image, ImageDraw
|
8 |
import threading
|
9 |
import openai
|
10 |
import os
|
|
|
109 |
thread.start()
|
110 |
|
111 |
return streamer
|
112 |
+
|
113 |
|
114 |
def diffusing(prompt):
|
115 |
"""
|
|
|
128 |
image=image
|
129 |
).images[0]
|
130 |
return image
|
131 |
+
|
132 |
def check_cuda_availability():
|
133 |
if torch.cuda.is_available():
|
134 |
return f"GPU: {torch.cuda.get_device_name(0)}"
|
|
|
137 |
|
138 |
mode = ""
|
139 |
|
140 |
+
# # Handling Gradio UI depending on mode
|
141 |
+
# def mode_gradio(text: str):
|
142 |
+
# """
|
143 |
+
# Handles the interface gradio will output depending what mode
|
144 |
+
# it's in. It stores the mode in a dictionary that was placed by
|
145 |
+
# a condition inside bot_comms and switches the behaviour in gradio.
|
146 |
+
# """
|
147 |
+
|
148 |
+
# # Mode active
|
149 |
+
# mode_selected = text
|
150 |
+
# return
|
151 |
+
|
152 |
+
# # Image created from diffusing
|
153 |
+
# image_created = {}
|
154 |
+
|
155 |
+
# # Generate An Image from PIL Image grabbed from diffusing
|
156 |
+
# def create_image(image):
|
157 |
+
# """
|
158 |
+
# Generates the image using the Image positions
|
159 |
+
# grabbed from PIL in the diffusing.
|
160 |
+
# """
|
161 |
+
# draw = ImageDraw.Draw(image)
|
162 |
+
# retu
|
163 |
+
|
164 |
@spaces.GPU(duration=120)
|
165 |
def bot_comms(message, history):
|
166 |
"""
|
|
|
191 |
logger.debug("Processing imagery prompt.")
|
192 |
message = message["text"]
|
193 |
image = diffusing(message)
|
194 |
+
# mode_gradio("imagery")
|
195 |
+
# image_created["Image"] = image
|
196 |
+
yield ("Generated Image:", image)
|
197 |
return
|
198 |
|
199 |
buffer = ""
|
|
|
215 |
|
216 |
with gr.Blocks(fill_height=True) as demo:
|
217 |
gr.Markdown(DESCRIPTION)
|
218 |
+
# if mode_gradio == "imagery":
|
219 |
+
# gr.Interface()
|
220 |
+
# Customize chatinterface to handle tuples
|
221 |
+
def custom_fn(*args, **kwargs):
|
222 |
+
result = bot_comms(*args, **kwargs):
|
223 |
+
if isinstance(result, tuple) and isinstance(result[1], Image.Image):
|
224 |
+
return [(result[0], None), (None, result[1])]
|
225 |
+
return result
|
226 |
gr.ChatInterface(
|
227 |
+
fn=custom_fn,
|
228 |
chatbot=chatbot,
|
229 |
fill_height=True,
|
230 |
multimodal=True,
|